source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_AxB_saxpy3_symbolic.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy3_symbolic: symbolic analysis for GB_AxB_saxpy3
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Symbolic analysis for C=A*B, C<M>=A*B or C<!M>=A*B, via GB_AxB_saxpy3.
// Coarse tasks compute nnz (C (:,j)) for each of their vectors j. Fine tasks
// just scatter the mask M into the hash table. This phase does not depend on
// the semiring, nor does it depend on the type of C, A, or B. It does access
// the values of M, if the mask matrix M is present and not structural.
#include "GB_AxB_saxpy3.h"
#include "GB_AxB_saxpy3_template.h"
#include "GB_atomics.h"
#include "GB_bracket.h"
// GB_GET_A_k and GB_GET_M_j declare aknz and mjnz, but these are unused here.
#include "GB_unused.h"
void GB_AxB_saxpy3_symbolic
(
GrB_Matrix C, // Cp [k] is computed for coarse tasks
const GrB_Matrix M, // mask matrix M
bool Mask_comp, // M complemented, or not
bool Mask_struct, // M structural, or not
const GrB_Matrix A, // A matrix; only the pattern is accessed
const GrB_Matrix B, // B matrix; only the pattern is accessed
GB_saxpy3task_struct *TaskList, // list of tasks, and workspace
int ntasks, // total number of tasks
int nfine, // number of fine tasks
int nthreads // number of threads
)
{
//--------------------------------------------------------------------------
// get M, A, B, and C
//--------------------------------------------------------------------------
int64_t *GB_RESTRICT Cp = C->p ;
// const int64_t *GB_RESTRICT Ch = C->h ;
const int64_t cvlen = C->vlen ;
// const int64_t cnvec = C->nvec ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bh = B->h ;
const int64_t *GB_RESTRICT Bi = B->i ;
// const GB_BTYPE *GB_RESTRICT Bx = B_is_pattern ? NULL : B->x ;
// const int64_t bvlen = B->vlen ;
// const int64_t bnvec = B->nvec ;
// const bool B_is_hyper = B->is_hyper ;
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ai = A->i ;
const int64_t anvec = A->nvec ;
const bool A_is_hyper = GB_IS_HYPER (A) ;
// const GB_ATYPE *GB_RESTRICT Ax = A_is_pattern ? NULL : A->x ;
const int64_t *GB_RESTRICT Mp = NULL ;
const int64_t *GB_RESTRICT Mh = NULL ;
const int64_t *GB_RESTRICT Mi = NULL ;
const GB_void *GB_RESTRICT Mx = NULL ;
size_t msize = 0 ;
int64_t mnvec = 0 ;
bool M_is_hyper = false ;
if (M != NULL)
{
Mp = M->p ;
Mh = M->h ;
Mi = M->i ;
Mx = (Mask_struct ? NULL : (M->x)) ;
msize = M->type->size ;
mnvec = M->nvec ;
M_is_hyper = M->is_hyper ;
}
// 3 cases:
// M not present and Mask_comp false: compute C=A*B
// M present and Mask_comp false: compute C<M>=A*B
// M present and Mask_comp true : compute C<!M>=A*B
// If M is NULL on input, then Mask_comp is also false on input.
bool mask_is_M = (M != NULL && !Mask_comp) ;
//==========================================================================
// phase1: count nnz(C(:,j)) for coarse tasks, scatter M for fine tasks
//==========================================================================
// At this point, all of Hf [...] is zero, for all tasks.
// Hi and Hx are not initialized.
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t hash_size = TaskList [taskid].hsize ;
bool use_Gustavson = (hash_size == cvlen) ;
if (taskid < nfine)
{
//------------------------------------------------------------------
// no work for fine tasks in phase1 if M is not present
//------------------------------------------------------------------
if (M == NULL) continue ;
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
int64_t kk = TaskList [taskid].vector ;
// partition M(:,j)
GB_GET_M_j ; // get M(:,j)
int team_size = TaskList [taskid].team_size ;
int master = TaskList [taskid].master ;
int my_teamid = taskid - master ;
int64_t mystart, myend ;
GB_PARTITION (mystart, myend, mjnz, my_teamid, team_size) ;
mystart += pM_start ;
myend += pM_start ;
if (use_Gustavson)
{
//--------------------------------------------------------------
// phase1: fine Gustavson task, C<M>=A*B or C<!M>=A*B
//--------------------------------------------------------------
// Scatter the values of M(:,j) into Hf. No atomics needed
// since all indices i in M(;,j) are unique.
int8_t *GB_RESTRICT
Hf = (int8_t *GB_RESTRICT) TaskList [taskid].Hf ;
GB_SCATTER_M_j (mystart, myend, 1) ;
}
else
{
//--------------------------------------------------------------
// phase1: fine hash task, C<M>=A*B or C<!M>=A*B
//--------------------------------------------------------------
// The least significant 2 bits of Hf [hash] is the flag f, and
// the upper bits contain h, as (h,f). After this phase1, if
// M(i,j)=1 then the hash table contains ((i+1),1) in Hf [hash]
// at some location.
// Later, the flag values of f = 2 and 3 are also used.
// Only f=1 is set in this phase.
// h == 0, f == 0: unoccupied and unlocked
// h == i+1, f == 1: occupied with M(i,j)=1
int64_t *GB_RESTRICT
Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ;
int64_t hash_bits = (hash_size-1) ;
for (int64_t pM = mystart ; pM < myend ; pM++) // scan my M(:,j)
{
GB_GET_M_ij ; // get M(i,j)
if (!mij) continue ; // skip if M(i,j)=0
int64_t i = Mi [pM] ;
int64_t i_mine = ((i+1) << 2) + 1 ; // ((i+1),1)
for (GB_HASH (i))
{
int64_t hf ;
// swap my hash entry into the hash table;
// does the following using an atomic capture:
// { hf = Hf [hash] ; Hf [hash] = i_mine ; }
GB_ATOMIC_CAPTURE_INT64 (hf, Hf [hash], i_mine) ;
if (hf == 0) break ; // success
// i_mine has been inserted, but a prior entry was
// already there. It needs to be replaced, so take
// ownership of this displaced entry, and keep
// looking until a new empty slot is found for it.
i_mine = hf ;
}
}
}
}
else
{
//------------------------------------------------------------------
// coarse tasks: compute nnz in each vector of A*B(:,kfirst:klast)
//------------------------------------------------------------------
int64_t *GB_RESTRICT
Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ;
int64_t kfirst = TaskList [taskid].start ;
int64_t klast = TaskList [taskid].end ;
int64_t mark = 0 ;
// int64_t nk = klast - kfirst + 1 ;
if (use_Gustavson)
{
//--------------------------------------------------------------
// phase1: coarse Gustavson task
//--------------------------------------------------------------
if (M == NULL)
{
//----------------------------------------------------------
// phase1: coarse Gustavson task, C=A*B
//----------------------------------------------------------
// Initially, Hf [...] < mark for all Hf.
// Hf [i] is set to mark when C(i,j) is found.
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
GB_GET_B_j ; // get B(:,j)
if (bjnz == 0)
{
Cp [kk] = 0 ;
continue ;
}
if (bjnz == 1)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
Cp [kk] = aknz ; // nnz(C(:,j)) = nnz(A(:,k))
continue ;
}
mark++ ;
int64_t cjnz = 0 ;
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == cvlen)
{
cjnz = cvlen ; // A(:,k) is dense
break ; // so nnz(C(:,j)) = cvlen
}
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
if (Hf [i] != mark) // if true, i is new
{
Hf [i] = mark ; // mark C(i,j) as seen
cjnz++ ; // C(i,j) is a new entry
}
}
}
Cp [kk] = cjnz ; // count the entries in C(:,j)
}
}
else if (mask_is_M)
{
//----------------------------------------------------------
// phase1: coarse Gustavson task, C<M>=A*B
//----------------------------------------------------------
// Initially, Hf [...] < mark for all of Hf.
// Hf [i] < mark : M(i,j)=0, C(i,j) is ignored.
// Hf [i] == mark : M(i,j)=1, and C(i,j) not yet seen.
// Hf [i] == mark+1 : M(i,j)=1, and C(i,j) has been seen.
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
GB_GET_B_j ; // get B(:,j)
if (bjnz == 0)
{
Cp [kk] = 0 ;
continue ;
}
GB_GET_M_j ; // get M(:,j)
if (mjnz == 0)
{
Cp [kk] = 0 ;
continue ;
}
GB_GET_M_j_RANGE (64) ; // get first and last in M(:,j)
mark += 2 ;
int64_t mark1 = mark+1 ;
// scatter M(:,j)
GB_SCATTER_M_j (pM_start, pM_end, mark) ;
int64_t cjnz = 0 ;
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
GB_SKIP_IF_A_k_DISJOINT_WITH_M_j ;
#define GB_IKJ_VECTORIZE GB_PRAGMA_VECTORIZE
#define GB_IKJ_IVDEP GB_PRAGMA_IVDEP
#define GB_IKJ \
{ \
if (Hf [i] == mark) /* if true, M(i,j) is 1*/\
{ \
Hf [i] = mark1 ; /* mark C(i,j) as seen */\
cjnz++ ; /* C(i,j) is new */ \
} \
}
GB_SCAN_M_j_OR_A_k ;
#undef GB_IKJ_VECTORIZE
#undef GB_IKJ_IVDEP
#undef GB_IKJ
}
Cp [kk] = cjnz ; // count the entries in C(:,j)
}
}
else
{
//----------------------------------------------------------
// phase1: coarse Gustavson task, C<!M>=A*B
//----------------------------------------------------------
// Initially, Hf [...] < mark for all of Hf.
// Hf [i] < mark : M(i,j)=0, C(i,j) is not yet seen.
// Hf [i] == mark : M(i,j)=1, so C(i,j) is ignored.
// Hf [i] == mark+1 : M(i,j)=0, and C(i,j) has been seen.
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
GB_GET_B_j ; // get B(:,j)
if (bjnz == 0)
{
Cp [kk] = 0 ;
continue ;
}
GB_GET_M_j ; // get M(:,j)
mark += 2 ;
int64_t mark1 = mark+1 ;
// scatter M(:,j)
GB_SCATTER_M_j (pM_start, pM_end, mark) ;
int64_t cjnz = 0 ;
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
if (Hf [i] < mark) // if true, M(i,j) is 0
{
Hf [i] = mark1 ; // mark C(i,j) as seen
cjnz++ ; // C(i,j) is a new entry
}
}
}
Cp [kk] = cjnz ; // count the entries in C(:,j)
}
}
}
else
{
//--------------------------------------------------------------
// phase1: coarse hash task
//--------------------------------------------------------------
int64_t *GB_RESTRICT Hi = TaskList [taskid].Hi ;
int64_t hash_bits = (hash_size-1) ;
if (M == NULL)
{
//----------------------------------------------------------
// phase1: coarse hash task, C=A*B
//----------------------------------------------------------
// Initially, Hf [...] < mark for all of Hf.
// Let f = Hf [hash] and h = Hi [hash]
// f < mark : unoccupied.
// h == i, f == mark : occupied with C(i,j)
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
GB_GET_B_j ; // get B(:,j)
if (bjnz == 0)
{
Cp [kk] = 0 ; continue ;
}
if (bjnz == 1)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
Cp [kk] = aknz ; // nnz(C(:,j)) = nnz(A(:,k))
continue ;
}
mark++ ;
int64_t cjnz = 0 ;
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
for (GB_HASH (i)) // find i in hash
{
if (Hf [hash] < mark)
{
Hf [hash] = mark ; // insert C(i,j)
Hi [hash] = i ;
cjnz++ ; // C(i,j) is a new entry.
break ;
}
if (Hi [hash] == i) break ;
}
}
}
Cp [kk] = cjnz ; // count the entries in C(:,j)
}
}
else if (mask_is_M)
{
//----------------------------------------------------------
// phase1: hash task, C<M>=A*B
//----------------------------------------------------------
// Initially, Hf [...] < mark for all of Hf.
// Let h = Hi [hash] and f = Hf [hash].
// f < mark: unoccupied, M(i,j)=0, C(i,j) ignored if
// this case occurs while scanning A(:,k)
// h == i, f == mark : M(i,j)=1, and C(i,j) not yet seen.
// h == i, f == mark+1 : M(i,j)=1, and C(i,j) has been seen.
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
GB_GET_B_j ; // get B(:,j)
if (bjnz == 0)
{
Cp [kk] = 0 ;
continue ;
}
GB_GET_M_j ; // get M(:,j)
if (mjnz == 0)
{
Cp [kk] = 0 ;
continue ;
}
GB_GET_M_j_RANGE (64) ; // get first and last in M(:,j)
mark += 2 ;
int64_t mark1 = mark+1 ;
GB_HASH_M_j ; // hash M(:,j)
int64_t cjnz = 0 ;
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
GB_SKIP_IF_A_k_DISJOINT_WITH_M_j ;
#define GB_IKJ_VECTORIZE
#define GB_IKJ_IVDEP
#define GB_IKJ \
{ \
for (GB_HASH (i)) /* find i in hash */ \
{ \
int64_t f = Hf [hash] ; \
if (f < mark) break ; /* M(i,j)=0; ignore*/\
if (Hi [hash] == i) /* if true, i found*/\
{ \
if (f == mark) /* if true, i is new */\
{ \
Hf [hash] = mark1 ; /* mark seen */\
cjnz++ ; /* C(i,j) is new */ \
} \
break ; \
} \
} \
}
GB_SCAN_M_j_OR_A_k ;
#undef GB_IKJ_VECTORIZE
#undef GB_IKJ_IVDEP
#undef GB_IKJ
}
Cp [kk] = cjnz ; // count the entries in C(:,j)
}
}
else
{
//----------------------------------------------------------
// phase1: coarse hash task, C<!M>=A*B
//----------------------------------------------------------
// Initially, Hf [...] < mark for all of Hf.
// Let h = Hi [hash] and f = Hf [hash].
// f < mark: unoccupied, M(i,j)=0, and C(i,j) not yet seen.
// h == i, f == mark : M(i,j)=1. C(i,j) ignored.
// h == i, f == mark+1 : M(i,j)=0, and C(i,j) has been seen.
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
GB_GET_B_j ; // get B(:,j)
if (bjnz == 0)
{
Cp [kk] = 0 ;
continue ;
}
GB_GET_M_j ; // get M(:,j)
mark += 2 ;
int64_t mark1 = mark+1 ;
GB_HASH_M_j ; // hash M(:,j)
int64_t cjnz = 0 ;
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
for (GB_HASH (i)) // find i in hash
{
if (Hf [hash] < mark) // if true, i is new
{
Hf [hash] = mark1 ; // mark C(i,j) seen
Hi [hash] = i ;
cjnz++ ; // C(i,j) is a new entry
break ;
}
if (Hi [hash] == i) break ;
}
}
}
Cp [kk] = cjnz ; // count the entries in C(:,j)
}
}
}
}
}
//--------------------------------------------------------------------------
// check result for phase1 for fine tasks
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
if (M != NULL)
{
for (taskid = 0 ; taskid < nfine ; taskid++)
{
int64_t kk = TaskList [taskid].vector ;
ASSERT (kk >= 0 && kk < B->nvec) ;
int64_t hash_size = TaskList [taskid].hsize ;
bool use_Gustavson = (hash_size == cvlen) ;
int master = TaskList [taskid].master ;
if (master != taskid) continue ;
GB_GET_M_j ; // get M(:,j)
int64_t mjcount2 = 0 ;
int64_t mjcount = 0 ;
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
GB_GET_M_ij ; // get M(i,j)
if (mij) mjcount++ ;
}
if (use_Gustavson)
{
// phase1: fine Gustavson task, C<M>=A*B or C<!M>=A*B
int8_t *GB_RESTRICT
Hf = (int8_t *GB_RESTRICT) TaskList [taskid].Hf ;
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
GB_GET_M_ij ; // get M(i,j)
ASSERT (Hf [Mi [pM]] == mij) ;
}
for (int64_t i = 0 ; i < cvlen ; i++)
{
ASSERT (Hf [i] == 0 || Hf [i] == 1) ;
if (Hf [i] == 1) mjcount2++ ;
}
ASSERT (mjcount == mjcount2) ;
}
else
{
// phase1: fine hash task, C<M>=A*B or C<!M>=A*B
// h == 0, f == 0: unoccupied and unlocked
// h == i+1, f == 1: occupied with M(i,j)=1
int64_t *GB_RESTRICT
Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ;
int64_t hash_bits = (hash_size-1) ;
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
GB_GET_M_ij ; // get M(i,j)
if (!mij) continue ; // skip if M(i,j)=0
int64_t i = Mi [pM] ;
int64_t i_mine = ((i+1) << 2) + 1 ; // ((i+1),1)
int64_t probe = 0 ;
for (GB_HASH (i))
{
int64_t hf = Hf [hash] ;
if (hf == i_mine)
{
mjcount2++ ;
break ;
}
ASSERT (hf != 0) ;
probe++ ;
ASSERT (probe < cvlen) ;
}
}
ASSERT (mjcount == mjcount2) ;
mjcount2 = 0 ;
for (int64_t hash = 0 ; hash < hash_size ; hash++)
{
int64_t hf = Hf [hash] ;
int64_t h = (hf >> 2) ; // empty (0), or a 1-based
int64_t f = (hf & 3) ; // 0 if empty or 1 if occupied
if (f == 1) ASSERT (h >= 1 && h <= cvlen) ;
ASSERT (hf == 0 || f == 1) ;
if (f == 1) mjcount2++ ;
}
ASSERT (mjcount == mjcount2) ;
}
}
}
#endif
}
|
keygen.h | /*
* File: keygen.h
* Author: t35brown
*
* Created on August 5, 2019, 7:24 PM
*/
#ifndef KEYGEN_H
#define KEYGEN_H
#include <algorithm>
#include <cassert>
#include "plaf.h"
template <typename K>
class KeyGeneratorUniform {
private:
PAD;
Random64 * rng;
int maxKey;
PAD;
public:
KeyGeneratorUniform(Random64 * _rng, int _maxKey) : rng(_rng), maxKey(_maxKey) {}
K next() {
auto result = 1+rng->next(maxKey);
assert((result >= 1) && (result <= maxKey));
// GSTATS_ADD_IX(tid, key_gen_histogram, 1, result);
return result;
}
};
class KeyGeneratorZipfData {
public:
PAD;
int maxKey;
double c = 0; // Normalization constant
double* sum_probs; // Pre-calculated sum of probabilities
PAD;
KeyGeneratorZipfData(const int _maxKey, const double _alpha) {
maxKey = _maxKey;
// Compute normalization constant c for implied key range: [1, maxKey]
for (int i = 1; i <= _maxKey; i++) {
c += ((double)1) / pow((double) i, _alpha);
}
double* probs = new double[_maxKey+1];
for (int i = 1; i <= _maxKey; i++) {
probs[i] = (((double)1) / pow((double) i, _alpha)) / c;
}
// Random should be seeded already (in main)
std::random_shuffle(probs + 1, probs + maxKey);
sum_probs = new double[_maxKey+1];
sum_probs[0] = 0;
for (int i = 1; i <= _maxKey; i++) {
sum_probs[i] = sum_probs[i - 1] + probs[i];
}
delete[] probs;
}
~KeyGeneratorZipfData() {
delete[] sum_probs;
}
};
template <typename K>
class KeyGeneratorZipf {
private:
PAD;
KeyGeneratorZipfData * data;
Random64 * rng;
PAD;
public:
KeyGeneratorZipf(KeyGeneratorZipfData * _data, Random64 * _rng)
: data(_data), rng(_rng) {}
K next() {
double z; // Uniform random number (0 < z < 1)
int zipf_value = 0; // Computed exponential value to be returned
// Pull a uniform random number (0 < z < 1)
do {
z = (rng->next() / (double) std::numeric_limits<uint64_t>::max());
} while ((z == 0) || (z == 1));
zipf_value = std::upper_bound(data->sum_probs + 1, data->sum_probs + data->maxKey + 1, z) - data->sum_probs;
// Assert that zipf_value is between 1 and N
assert((zipf_value >= 1) && (zipf_value <= data->maxKey));
// GSTATS_ADD_IX(tid, key_gen_histogram, 1, zipf_value);
return (zipf_value);
}
};
// class KeyGeneratorZipfData {
// public:
// PAD;
// int maxKey;
// double theta;
// double c = 0; // Normalization constant
// double * sum_probs; // Pre-calculated sum of probabilities
// PAD;
// KeyGeneratorZipfData(int _maxKey, double _alpha) {
// // std::cout<<"start KeyGeneratorZipfData"<<std::endl;
// maxKey = _maxKey;
// theta = _alpha;
// // Compute normalization constant c for implied key range: [1, maxKey]
// int i;
// for (i = 1; i <= _maxKey; i++)
// c = c + (1.0 / pow((double) i, theta));
// c = 1.0 / c;
// sum_probs = new double[_maxKey+1];
// sum_probs[0] = 0;
// for (i = 1; i <= _maxKey; i++) {
// sum_probs[i] = sum_probs[i - 1] + c / pow((double) i, theta);
// }
// // std::cout<<"end KeyGeneratorZipfData"<<std::endl;
// }
// ~KeyGeneratorZipfData() {
// delete[] sum_probs;
// }
// };
// template <typename K>
// class KeyGeneratorZipf {
// private:
// PAD;
// KeyGeneratorZipfData * data;
// Random64 * rng;
// PAD;
// public:
// KeyGeneratorZipf(KeyGeneratorZipfData * _data, Random64 * _rng)
// : data(_data), rng(_rng) {}
// K next() {
// double z; // Uniform random number (0 < z < 1)
// int zipf_value = 0; // Computed exponential value to be returned
// int low, high, mid; // Binary-search bounds
// // Pull a uniform random number (0 < z < 1)
// do {
// z = (rng->next() / (double) std::numeric_limits<uint64_t>::max());
// // printf(" z=%lf\n", z);
// } while ((z == 0) || (z == 1));
// // Map z to the value
// low = 1, high = data->maxKey;
// do {
// mid = floor((low + high) / 2);
// if (data->sum_probs[mid] >= z && data->sum_probs[mid - 1] < z) {
// zipf_value = mid;
// break;
// } else if (data->sum_probs[mid] >= z) {
// high = mid - 1;
// } else {
// low = mid + 1;
// }
// } while (low <= high);
// // Assert that zipf_value is between 1 and N
// assert((zipf_value >= 1) && (zipf_value <= data->maxKey));
// GSTATS_ADD_IX(tid, key_gen_histogram, 1, zipf_value);
// return (zipf_value);
// }
// };
// Sampler taken from https://commons.apache.org/proper/commons-math/apidocs/src-html/org/apache/commons/math4/distribution/ZipfDistribution.html#line.44
// Paper: Rejection-Inversion to Generate Variates from Monotone Discrete Distributions.
struct ZipfRejectionInversionSamplerData {
int* mapping;
const int maxkey;
ZipfRejectionInversionSamplerData(int _maxkey): maxkey(_maxkey) {
mapping = new int[maxkey + 1];
#pragma omp parallel for
for (int i = 0; i < maxkey + 1; ++i) {
mapping[i] = i;
}
std::random_shuffle(mapping + 1, mapping + maxkey);
}
~ZipfRejectionInversionSamplerData() {
delete[] mapping;
}
};
class ZipfRejectionInversionSampler {
const double exponent;
const int maxkey;
Random64* rng;
ZipfRejectionInversionSamplerData* const data;
double hIntegralX1;
double hIntegralmaxkey;
double s;
double hIntegral(const double x) {
return helper2((1 - exponent) * log(x)) * log(x);
}
double h(const double x) {
return exp(-exponent * log(x));
}
double hIntegralInverse(const double x) {
double t = x * (1 - exponent);
if (t < -1) {
// Limit value to the range [-1, +inf).
// t could be smaller than -1 in some rare cases due to numerical errors.
t = -1;
}
return exp(helper1(t) * x);
}
double helper1(const double x) {
// if (abs(x)>1e-8) {
return log(x + 1)/x;
// }
// else {
// return 1.-x*((1./2.)-x*((1./3.)-x*(1./4.)));
// }
}
double helper2(const double x) {
// if (FastMath.abs(x)>1e-8) {
return (exp(x) - 1)/x;
// }
// else {
// return 1.+x*(1./2.)*(1.+x*(1./3.)*(1.+x*(1./4.)));
// }
}
public:
/** Simple constructor.
* @param maxkey number of elements
* @param exponent exponent parameter of the distribution
*/
ZipfRejectionInversionSampler(ZipfRejectionInversionSamplerData* const _data, const double _exponent, Random64 * _rng): data(_data), maxkey(_data->maxkey), exponent(_exponent), rng(_rng) {
if (exponent <= 1) {
std::cout << "-dist-zipf-fast only works with exponents greater than 1." << std::endl;
exit(-1);
}
hIntegralX1 = hIntegral(1.5) - 1;
hIntegralmaxkey = hIntegral(maxkey + 0.5);
s = 2 - hIntegralInverse(hIntegral(2.5) - h(2));
}
/** Generate one integral number in the range [1, maxkey].
* @param random random generator to use
* @return generated integral number in the range [1, maxkey]
*/
int next() {
while(true) {
// Pull a uniform random number (0 < z < 1)
const double z = (rng->next() / (double) std::numeric_limits<uint64_t>::max());
const double u = hIntegralmaxkey + z * (hIntegralX1 - hIntegralmaxkey);
// u is uniformly distributed in (hIntegralX1, hIntegralmaxkey]
double x = hIntegralInverse(u);
int k = (int)(x + 0.5);
if (k < 1) {
k = 1;
}
else if (k > maxkey) {
k = maxkey;
}
if (k - x <= s || u >= hIntegral(k + 0.5) - h(k)) {
return data->mapping[k];
}
}
}
};
#endif /* KEYGEN_H */
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_random_paths(char **paths, int n, int m)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char **replace_paths = calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop;
if(center){
crop = center_crop_image(im, size, size);
} else {
crop = random_augment_image(im, angle, aspect, min, max, size, size);
}
int flip = rand()%2;
if (flip) flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
box_label *read_boxes(char *filename, int *n)
{
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
float x, y, h, w;
int id;
int count = 0;
int size = 64;
box_label *boxes = calloc(size, sizeof(box_label));
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
if(count == size) {
size = size * 2;
boxes = realloc(boxes, size*sizeof(box_label));
}
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = rand()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 30; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .005 || h < .005) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
void load_rle(image im, int *rle, int n)
{
int count = 0;
int curr = 0;
int i,j;
for(i = 0; i < n; ++i){
for(j = 0; j < rle[i]; ++j){
im.data[count++] = curr;
}
curr = 1 - curr;
}
for(; count < im.h*im.w*im.c; ++count){
im.data[count] = curr;
}
}
void or_image(image src, image dest, int c)
{
int i;
for(i = 0; i < src.w*src.h; ++i){
if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1;
}
}
void exclusive_image(image src)
{
int k, j, i;
int s = src.w*src.h;
for(k = 0; k < src.c-1; ++k){
for(i = 0; i < s; ++i){
if (src.data[k*s + i]){
for(j = k+1; j < src.c; ++j){
src.data[j*s + i] = 0;
}
}
}
}
}
box bound_image(image im)
{
int x,y;
int minx = im.w;
int miny = im.h;
int maxx = 0;
int maxy = 0;
for(y = 0; y < im.h; ++y){
for(x = 0; x < im.w; ++x){
if(im.data[y*im.w + x]){
minx = (x < minx) ? x : minx;
miny = (y < miny) ? y : miny;
maxx = (x > maxx) ? x : maxx;
maxy = (y > maxy) ? y : maxy;
}
}
}
box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
return b;
}
void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
box b = bound_image(sized);
if(b.w > 0){
image crop = crop_image(sized, b.x, b.y, b.w, b.h);
image mask = resize_image(crop, mw, mh);
truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w;
truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h;
truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w;
truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h;
int j;
for(j = 0; j < mw*mh; ++j){
truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j];
}
truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id;
free_image(crop);
free_image(mask);
++i;
}
free_image(sized);
free(rle);
}
fclose(file);
free_image(part);
}
void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, "raw", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if(count > num_boxes) count = num_boxes;
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if ((w < .001 || h < .001)) continue;
truth[i*5+0] = x;
truth[i*5+1] = y;
truth[i*5+2] = w;
truth[i*5+3] = h;
truth[i*5+4] = id;
}
free(boxes);
}
#define NUMCHARS 37
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
}
}
if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path);
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_regression_labels_paths(char **paths, int n)
{
matrix y = make_matrix(n, 1);
int i;
for(i = 0; i < n; ++i){
char labelpath[4096];
find_replace(paths[i], "images", "targets", labelpath);
find_replace(labelpath, "JPEGImages", "targets", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
fscanf(file, "%f", &(y.vals[i][0]));
fclose(file);
}
return y;
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "imgs", "labels", label);
find_replace(label, "_iconl.jpeg", ".txt", label);
FILE *file = fopen(label, "r");
if(!file){
find_replace(label, "labels", "labels2", label);
file = fopen(label, "r");
if(!file) continue;
}
++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
printf("%d/%d\n", count, n);
return y;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
image get_segmentation_image(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
image get_segmentation_image2(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes+1);
int i;
for(i = 0; i < w*h; ++i){
mask.data[w*h*classes + i] = 1;
}
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
for(i = 0; i < w*h; ++i){
if(part.data[i]) mask.data[w*h*classes + i] = 0;
}
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y.rows = n;
d.y.cols = h*w*classes/div/div;
d.y.vals = calloc(d.X.rows, sizeof(float*));
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
//image mask = make_image(orig.w, orig.h, classes+1);
image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
if(flip) flip_image(sized_m);
d.y.vals[i] = sized_m.data;
free_image(orig);
free_image(mask);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (coords+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = rand()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*30;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, 5*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
image sized = make_image(w, h, orig.c);
fill_image(sized, .5);
float dw = jitter * orig.w;
float dh = jitter * orig.h;
float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh));
float scale = rand_uniform(.25, 2);
float nw, nh;
if(new_ar < 1){
nh = scale * h;
nw = nh * new_ar;
} else {
nw = scale * w;
nh = nw / new_ar;
}
float dx = rand_uniform(0, w - nw);
float dy = rand_uniform(0, h - nh);
place_image(orig, nw, nh, dx, dy, sized);
random_distort_image(sized, hue, saturation, exposure);
int flip = rand()%2;
if(flip) flip_image(sized);
d.X.vals[i] = sized.data;
fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h);
free_image(orig);
}
free(random_paths);
return d;
}
void *load_thread(void *ptr)
{
//printf("Loading data: %d\n", rand());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == REGRESSION_DATA){
*a.d = load_data_regression(a.paths, a.n, a.m, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == INSTANCE_DATA){
*a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SEGMENTATION_DATA){
*a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
} else if (a.type == LETTERBOX_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data *buffers = calloc(args.threads, sizeof(data));
pthread_t *threads = calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
void load_data_blocking(load_args args)
{
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
load_thread(ptr);
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = rand()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_regression(char **paths, int n, int m, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_regression_labels_paths(paths, n);
if(m) free(paths);
return d;
}
data select_data(data *orig, int *inds)
{
data d = {0};
d.shallow = 1;
d.w = orig[0].w;
d.h = orig[0].h;
d.X.rows = orig[0].X.rows;
d.y.rows = orig[0].X.rows;
d.X.cols = orig[0].X.cols;
d.y.cols = orig[0].y.cols;
d.X.vals = calloc(orig[0].X.rows, sizeof(float *));
d.y.vals = calloc(orig[0].y.rows, sizeof(float *));
int i;
for(i = 0; i < d.X.rows; ++i){
d.X.vals[i] = orig[inds[i]].X.vals[i];
d.y.vals[i] = orig[inds[i]].y.vals[i];
}
return d;
}
data *tile_data(data orig, int divs, int size)
{
data *ds = calloc(divs*divs, sizeof(data));
int i, j;
#pragma omp parallel for
for(i = 0; i < divs*divs; ++i){
data d;
d.shallow = 0;
d.w = orig.w/divs * size;
d.h = orig.h/divs * size;
d.X.rows = orig.X.rows;
d.X.cols = d.w*d.h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(j = 0; j < orig.X.rows; ++j){
int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
}
ds[i] = d;
}
return ds;
}
data resize_data(data orig, int w, int h)
{
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
int i;
d.X.rows = orig.X.rows;
d.X.cols = w*h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(i = 0; i < orig.X.rows; ++i){
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
d.X.vals[i] = resize_image(im, w, h).data;
}
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.w=size;
d.h=size;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
d.w = d1.w;
d.h = d1.h;
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data new = concat_data(d[i], out);
free_data(out);
out = new;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = rand()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i+b*10000][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = rand()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
data copy_data(data d)
{
data c = {0};
c.w = d.w;
c.h = d.h;
c.shallow = 0;
c.num_boxes = d.num_boxes;
c.boxes = d.boxes;
c.X = copy_matrix(d.X);
c.y = copy_matrix(d.y);
return c;
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = calloc(num, sizeof(float *));
r.y.vals = calloc(num, sizeof(float *));
int i;
for(i = 0; i < num; ++i){
int index = rand()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data *split = calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = calloc(train.X.rows, sizeof(float*));
test.X.vals = calloc(test.X.rows, sizeof(float*));
train.y.vals = calloc(train.y.rows, sizeof(float*));
test.y.vals = calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
Interp1PrimFifthOrderCompactUpwind.c | /*! @file Interp1PrimFifthOrderCompactUpwind.c
* @brief 5th order compact upwind scheme (component-wise application to vectors).
* @author Debojyoti Ghosh
*/
#include <stdio.h>
#include <basic.h>
#include <arrayfunctions.h>
#include <mathfunctions.h>
#include <interpolation.h>
#include <tridiagLU.h>
#include <mpivars.h>
#include <hypar.h>
#ifdef with_omp
#include <omp.h>
#endif
#undef _MINIMUM_GHOSTS_
/*! \def _MINIMUM_GHOSTS_
* Minimum number of ghost points required for this interpolation
* method.
*/
#define _MINIMUM_GHOSTS_ 3
/*! @brief 5th order compact upwind reconstruction (component-wise) on a uniform grid
Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$
at the interfaces from the cell-centered values of the function using the fifth order compact upwind scheme on a
uniform grid. The first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ that satisfies:
\f{equation}{
{\bf f}\left({\bf u}\left(x\right)\right) = \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf u}\left(\zeta\right)\right)d\zeta,
\f}
where \f$x\f$ is the spatial coordinate along the dimension of the interpolation. This function computes the 5th order compact upwind numerical approximation
\f$\hat{\bf f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as:
\f{align}{
\frac{3}{10}\hat{\bf f}_{j-1/2} + \frac{6}{10}\hat{\bf f}_{j+1/2} + \frac{1}{10}\hat{\bf f}_{j+3/2} = \frac{1}{30}{\bf f}_{j-1} + \frac{19}{30}{\bf f}_j + \frac{1}{3}{\bf f}_{j+1}.
\f}
The resulting tridiagonal system is solved using tridiagLU() (see also #TridiagLU, tridiagLU.h).
\b Implementation \b Notes:
+ This method assumes a uniform grid in the spatial dimension corresponding to the interpolation.
+ The method described above corresponds to a left-biased interpolation. The corresponding right-biased
interpolation can be obtained by reflecting the equations about interface j+1/2.
+ The scalar interpolation method is applied to the vector function in a component-wise manner.
+ The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction
and carries out the 1D interpolation along these grid lines.
+ Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure:
@image html chap1_1Ddomain.png
@image latex chap1_1Ddomain.eps width=0.9\textwidth
\b Function \b arguments:
Argument | Type | Explanation
--------- | --------- | ---------------------------------------------
fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers).
fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points.
u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions.
x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth.
upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect.
dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D)
s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local.
m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks.
uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$).
\b Reference:
+ Ghosh, D., Baeder, J. D., Compact Reconstruction Schemes with Weighted ENO Limiting
for Hyperbolic Conservation Laws, SIAM Journal on Scientific Computing, 34 (3), 2012, A1678–A1706,
http://dx.doi.org/10.1137/110857659
*/
int Interp1PrimFifthOrderCompactUpwind(
double *fI, /*!< Array of interpolated function values at the interfaces */
double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */
double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */
double *x, /*!< Grid coordinates */
int upw, /*!< Upwind direction (left or right biased) */
int dir, /*!< Spatial dimension along which to interpolation */
void *s, /*!< Object of type #HyPar containing solver-related variables */
void *m, /*!< Object of type #MPIVariables containing MPI-related variables */
int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */
)
{
HyPar *solver = (HyPar*) s;
MPIVariables *mpi = (MPIVariables*) m;
CompactScheme *compact= (CompactScheme*) solver->compact;
TridiagLU *lu = (TridiagLU*) solver->lusolver;
int sys,Nsys,d,v;
_DECLARE_IERR_;
int ghosts = solver->ghosts;
int ndims = solver->ndims;
int nvars = solver->nvars;
int *dim = solver->dim_local;
int *stride= solver->stride_with_ghosts;
/* define some constants */
static const double one_third = 1.0/3.0,
thirteen_by_sixty = 13.0/60.0,
fortyseven_by_sixty = 47.0/60.0,
twentyseven_by_sixty = 27.0/60.0,
one_by_twenty = 1.0/20.0,
one_by_thirty = 1.0/30.0,
nineteen_by_thirty = 19.0/30.0,
three_by_ten = 3.0/10.0,
six_by_ten = 6.0/10.0,
one_by_ten = 1.0/10.0;
/* create index and bounds for the outer loop, i.e., to loop over all 1D lines along
dimension "dir" */
int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims];
_ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1;
_ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1;
int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer);
/* calculate total number of tridiagonal systems to solve */
_ArrayProduct1D_(bounds_outer,ndims,Nsys); Nsys *= nvars;
/* Allocate arrays for tridiagonal system */
double *A = compact->A;
double *B = compact->B;
double *C = compact->C;
double *R = compact->R;
#pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI)
for (sys=0; sys < N_outer; sys++) {
_ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
_ArrayCopy1D_(index_outer,indexC,ndims);
_ArrayCopy1D_(index_outer,indexI,ndims);
for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
int qm1,qm2,qm3,qp1,qp2,p;
_ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
if (upw > 0) {
indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
qm3 = qm1 - 2*stride[dir];
qm2 = qm1 - stride[dir];
qp1 = qm1 + stride[dir];
qp2 = qm1 + 2*stride[dir];
} else {
indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
qm3 = qm1 + 2*stride[dir];
qm2 = qm1 + stride[dir];
qp1 = qm1 - stride[dir];
qp2 = qm1 - 2*stride[dir];
}
/* Defining stencil points */
double *fm3, *fm2, *fm1, *fp1, *fp2;
fm3 = fC+qm3*nvars;
fm2 = fC+qm2*nvars;
fm1 = fC+qm1*nvars;
fp1 = fC+qp1*nvars;
fp2 = fC+qp2*nvars;
if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 ))
|| ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
/* Use 5th order upwind at the physical boundaries */
_ArraySetValue_((A+Nsys*indexI[dir]+sys*nvars),nvars,0.0)
_ArraySetValue_((B+Nsys*indexI[dir]+sys*nvars),nvars,1.0)
_ArraySetValue_((C+Nsys*indexI[dir]+sys*nvars),nvars,0.0)
for (v=0; v<nvars; v++) {
(R+Nsys*indexI[dir]+sys*nvars)[v] = one_by_thirty * fm3[v]
- thirteen_by_sixty * fm2[v]
+ fortyseven_by_sixty * fm1[v]
+ twentyseven_by_sixty * fp1[v]
- one_by_twenty * fp2[v];
}
} else {
/* Use 5th order upwind at the physical boundaries */
if (upw > 0) {
_ArraySetValue_((A+Nsys*indexI[dir]+sys*nvars),nvars,three_by_ten);
_ArraySetValue_((B+Nsys*indexI[dir]+sys*nvars),nvars,six_by_ten );
_ArraySetValue_((C+Nsys*indexI[dir]+sys*nvars),nvars,one_by_ten );
} else {
_ArraySetValue_((C+Nsys*indexI[dir]+sys*nvars),nvars,three_by_ten);
_ArraySetValue_((B+Nsys*indexI[dir]+sys*nvars),nvars,six_by_ten );
_ArraySetValue_((A+Nsys*indexI[dir]+sys*nvars),nvars,one_by_ten );
}
for (v=0; v<nvars; v++) {
(R+Nsys*indexI[dir]+sys*nvars)[v] = one_by_thirty * fm2[v]
+ nineteen_by_thirty * fm1[v]
+ one_third * fp1[v];
}
}
}
}
#ifdef serial
/* Solve the tridiagonal system */
IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,NULL); CHECKERR(ierr);
#else
/* Solve the tridiagonal system */
/* all processes except the last will solve without the last interface to avoid overlap */
if (mpi->ip[dir] != mpi->iproc[dir]-1) { IERR tridiagLU(A,B,C,R,dim[dir] ,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); }
else { IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); }
/* Now get the solution to the last interface from the next proc */
double *sendbuf = compact->sendbuf;
double *recvbuf = compact->recvbuf;
MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL};
if (mpi->ip[dir]) for (d=0; d<Nsys; d++) sendbuf[d] = R[d];
if (mpi->ip[dir] != mpi->iproc[dir]-1) MPI_Irecv(recvbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]+1,214,mpi->comm[dir],&req[0]);
if (mpi->ip[dir]) MPI_Isend(sendbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]-1,214,mpi->comm[dir],&req[1]);
MPI_Waitall(2,&req[0],MPI_STATUS_IGNORE);
if (mpi->ip[dir] != mpi->iproc[dir]-1) for (d=0; d<Nsys; d++) R[d+Nsys*dim[dir]] = recvbuf[d];
#endif
/* save the solution to fI */
#pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI)
for (sys=0; sys < N_outer; sys++) {
_ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
_ArrayCopy1D_(index_outer,indexI,ndims);
for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
_ArrayCopy1D_((R+sys*nvars+Nsys*indexI[dir]),(fI+nvars*p),nvars);
}
}
return(0);
}
|
tinyexr.h | /*
Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
#ifndef TINYEXR_H_
#define TINYEXR_H_
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-5)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-6)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7)
#define TINYEXR_ERROR_INVALID_HEADER (-8)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-10)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-11)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { to be removed. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Free's internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Free's internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Free's error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEIFNED
#define TINYEXR_IMPLEMENTATION_DEIFNED
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
//#include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#endif // __cplusplus > 199711L
#ifdef _OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#include "zfp.h"
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen));
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
int data_window[4];
int line_order;
int display_window[4];
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window[0] = 0;
data_window[1] = 0;
data_window[2] = 0;
data_window[3] = 0;
line_order = 0;
display_window[0] = 0;
display_window[1] = 0;
display_window[2] = 0;
display_window[3] = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
} HeaderInfo;
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling));
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressable run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
int precision;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0f;
}
};
bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) &&
(attributes[i].size == 1)) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
}
}
if (!foundType) {
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else {
assert(0);
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
int num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size = dst_width * dst_num_lines * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((dst_width & 3U) || (dst_num_lines & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, dst_width, dst_num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = dst_width * dst_num_lines;
for (int c = 0; c < num_channels; c++) {
// decompress 4x4 pixel block.
for (int y = 0; y < dst_num_lines; y += 4) {
for (int x = 0; x < dst_width; x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * dst_width + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize,
const float *inPtr, int width, int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((width & 3U) || (num_lines & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, width, num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = width * num_lines;
for (int c = 0; c < num_channels; c++) {
// compress 4x4 pixel block.
for (int y = 0; y < num_lines; y += 4) {
for (int x = 0; x < width; x += 4) {
float fblock[16];
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * width + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = zfp_stream_compressed_size(zfp);
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)),
dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param, attributes,
num_attributes)) {
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static void DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
assert(tile_offset_x * tile_size_x < data_width);
assert(tile_offset_y * tile_size_y < data_height);
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window[0] = info.display_window[0];
exr_header->display_window[1] = info.display_window[1];
exr_header->display_window[2] = info.display_window[2];
exr_header->display_window[3] = info.display_window[3];
exr_header->data_window[0] = info.data_window[0];
exr_header->data_window[1] = info.data_window[1];
exr_header->data_window[2] = info.data_window[2];
exr_header->data_window[3] = info.data_window[3];
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy poiner
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
if ((data_width < 0) || (data_height < 0)) {
if (err) {
std::stringstream ss;
ss << "Invalid data width or data height: " << data_width << ", "
<< data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if ((data_width > threshold) || (data_height > threshold)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
exr_header->tile_size_x, exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
if (err) {
(*err) += "Insufficient data size.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
if (tile_coordinates[3] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
if (err) {
(*err) += "Insufficient data length.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, tile_coordinates[0],
tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y,
static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
exr_image->num_tiles = static_cast<int>(num_tiles);
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown = sizeof(void*) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown ) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example `data_len
// < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window[1];
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
} // omp parallel
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0];
if (data_width >= std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_width++;
int data_height = exr_header->data_window[3] - exr_header->data_window[1];
if (data_height >= std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_height++;
if ((data_width < 0) || (data_height < 0)) {
tinyexr::SetErrorMessage("data width or data height is negative.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if (data_width > threshold) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > threshold) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
size_t num_blocks = 0;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
}
return ret;
}
}
} // namespace tinyexr
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return TINYEXR_ERROR_INVALID_HEADER;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Failed to parse EXR version", err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] * static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
size_t totalSize = static_cast<size_t>(offset);
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
if (memory.size() == 0) {
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char *>(malloc(totalSize));
memcpy((*memory_out), &memory.at(0), memory.size());
unsigned char *memory_ptr = *memory_out + memory.size();
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size());
memory_ptr += data_list[i].size();
}
return totalSize; // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "wb");
#else
FILE *fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _MSC_VER
FILE *fp = NULL;
errno_t errcode = fopen_s(&fp, filename, "rb");
if ((0 != errcode) || (!fp)) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh));
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&h));
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEIFNED
#endif // TINYEXR_IMPLEMENTATION
|
naive_math_impl.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cmath>
template <typename type>
static void basic_trans_mat_to_c4(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 3) / 4 * 4;
int k_round = (K + 3) / 4 * 4;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 4;
type* zero_buf = new type[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 4 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
if (4 * (i + 1) - M > 0) {
switch (4 * (i + 1) - M) {
case 3:
in1 = zero_buf;
case 2:
in2 = zero_buf;
case 1:
in3 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
delete[] zero_buf;
}
template <typename type>
static void basic_trans_mat_to_c8(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 7) / 8 * 8;
int k_round = (K + 7) / 8 * 8;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 8;
type zero_buf[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 8 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
const type* in4 = in3 + ldin;
const type* in5 = in4 + ldin;
const type* in6 = in5 + ldin;
const type* in7 = in6 + ldin;
if (8 * (i + 1) - M > 0) {
switch (8 * (i + 1) - M) {
case 7:
in1 = zero_buf;
case 6:
in2 = zero_buf;
case 5:
in3 = zero_buf;
case 4:
in4 = zero_buf;
case 3:
in5 = zero_buf;
case 2:
in6 = zero_buf;
case 1:
in7 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
*output++ = *in4++;
*output++ = *in5++;
*output++ = *in6++;
*output++ = *in7++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
}
template <typename type, typename type2>
static void basic_gemm_c4(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c4(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm_c8(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c8(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * c[i * ldc + j] + bias_data;
if (flag_relu) {
c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
c[i * ldc + j] = tmp;
}
}
}
}
template <typename type, typename type2>
static void basic_gemv(int m,
int k,
const type* a,
const type* b,
const type2* bias,
type2* c,
type2 alpha,
type2 beta,
bool trans_a = false,
bool flag_bias = false,
int flag_act = false,
float six = 6.f,
float leakey_relu_alpha = 1.f) {
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
auto sum = static_cast<type2>(0);
for (int j = 0; j < k; ++j) {
type av;
if (trans_a) {
av = a[j * m + i];
} else {
av = a[i * k + j];
}
sum += av * b[j];
}
type2 tmp = alpha * sum + beta * c[i] + bias_data;
if (flag_act > 0) {
if (flag_act == 1) { // relu
c[i] = tmp > (type2)0 ? tmp : (type2)0;
} else if (flag_act == 2) { // relu 6
c[i] = tmp > (type2)0 ? tmp : (type2)0;
c[i] = c[i] < six ? c[i] : six; // ut compute
} else if (flag_act == 4) { // leakey relu
c[i] = tmp < (type2)0 ? (type2)(tmp * leakey_relu_alpha) : tmp;
}
} else {
c[i] = tmp;
}
}
}
/**
* \brief basic direct convolution function
*/
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
static void conv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
int act_type,
float six = 6.f,
float scale = 1.f) {
Dtype2 beta = 0;
auto src_data = din;
auto dst_data_ref = dout;
auto weights_data = weights;
auto with_bias = flag_bias;
auto bias_data = bias;
int in_num = num;
int out_channels = chout;
int out_h = hout;
int out_w = wout;
int in_channel = chin;
int in_h = hin;
int in_w = win;
int out_c_group = out_channels / group;
int in_c_group = in_channel / group;
for (int n = 0; n < in_num; ++n) {
#pragma omp parallel for collapse(4)
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < out_h; ++oh) {
for (int ow = 0; ow < out_w; ++ow) {
int out_idx = n * group * out_c_group * out_h * out_w +
g * out_c_group * out_h * out_w + oc * out_h * out_w +
oh * out_w + ow;
Dtype2 bias_d = with_bias ? (bias_data[g * out_c_group + oc]) : 0;
dst_data_ref[out_idx] = bias_d; // + dst_data_ref[out_idx] * beta;
for (int ic = 0; ic < in_c_group; ++ic) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
int iw = ow * stride_w - pad_w + kw * (dila_w);
int ih = oh * stride_h - pad_h + kh * (dila_h);
if (iw < 0 || iw >= in_w) continue;
if (ih < 0 || ih >= in_h) continue;
int iidx = n * in_channel * in_h * in_w +
g * in_c_group * in_h * in_w + ic * in_h * in_w +
ih * in_w + iw;
int widx =
g * out_c_group * in_c_group * kernel_h * kernel_w +
oc * in_c_group * kernel_h * kernel_w +
ic * kernel_h * kernel_w + kh * kernel_w + kw;
dst_data_ref[out_idx] += src_data[iidx] * weights_data[widx];
}
}
}
if (act_type > 0) {
// 1-relu 2-relu6 4-leakyrelu
if (act_type == 1) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
} else if (act_type == 2) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
dst_data_ref[out_idx] = dst_data_ref[out_idx] < (Dtype2)six
? dst_data_ref[out_idx]
: (Dtype2)six;
} else if (act_type == 4) {
dst_data_ref[out_idx] =
dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)(dst_data_ref[out_idx] * scale);
} else {
printf("this act type: %d does not support \n", act_type);
}
}
}
}
}
}
}
}
template <typename Dtype>
static void fill_bias_relu(Dtype* tensor,
const Dtype* bias,
int channel,
int channel_size,
bool flag_bias,
bool flag_relu) {
Dtype* data = tensor;
for (int j = 0; j < channel; ++j) {
Dtype bias_c = flag_bias ? bias[j] : 0;
for (int i = 0; i < channel_size; i++) {
data[i] += bias_c;
if (flag_relu) {
data[i] = data[i] > 0 ? data[i] : 0.f;
}
}
data += channel_size;
}
}
template <typename Dtype>
static void do_relu(Dtype* tensor, int size) {
for (int j = 0; j < size; ++j) {
tensor[j] = tensor[j] > 0 ? tensor[j] : (Dtype)0;
}
}
inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
return static_cast<unsigned>(a) < static_cast<unsigned>(b);
}
template <typename Dtype>
static void col2im(const Dtype* data_col,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h0,
const int pad_h1,
const int pad_w0,
const int pad_w1,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
Dtype* data_im) {
memset(data_im, 0, height * width * channels * sizeof(Dtype));
const int output_h =
(height + pad_h0 + pad_h1 - (dilation_h * (kernel_h - 1) + 1)) /
stride_h +
1;
const int output_w =
(width + pad_w0 + pad_w1 - (dilation_w * (kernel_w - 1) + 1)) / stride_w +
1;
const int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
int input_row = -pad_h0 + kernel_row * dilation_h;
for (int output_rows = output_h; output_rows; output_rows--) {
if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
data_col += output_w;
} else {
int input_col = -pad_w0 + kernel_col * dilation_w;
for (int output_col = output_w; output_col; output_col--) {
if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
data_im[input_row * width + input_col] += *data_col;
}
data_col++;
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deconv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w0,
int pad_w1,
int pad_h0,
int pad_h1,
bool flag_bias,
bool flag_relu) {
int m = chout * kernel_w * kernel_h / group;
int n = hin * win;
int k = chin / group;
int group_size_in = win * hin * chin / group;
int group_size_coldata = m * n;
int group_size_weights = chin * chout * kernel_w * kernel_h / (group * group);
bool flag_1x1s1p1 = (kernel_w == 1) && (kernel_h == 1) && (stride_h == 1) &&
(stride_w == 1) && (pad_w0 == 0) && (pad_h0 == 0) &&
(pad_w1 == 0) && (pad_h1 == 0) && (dila_w == 1) &&
(dila_h == 1);
Dtype2* workspace_ptr =
static_cast<Dtype2*>(malloc(sizeof(float) * m * n * group));
for (int i = 0; i < num; ++i) {
const Dtype1* din_batch = din + i * chin * hin * win;
Dtype2* dout_batch = dout + i * chout * hout * wout;
Dtype2* col_data = workspace_ptr;
if (flag_1x1s1p1) {
col_data = dout_batch;
}
memset(col_data, 0, sizeof(Dtype2) * group_size_coldata * group);
for (int g = 0; g < group; ++g) {
const Dtype1* din_group = din_batch + g * group_size_in;
const Dtype1* weights_group = weights + g * group_size_weights;
Dtype2* coldata_group = col_data + g * group_size_coldata;
basic_gemm<Dtype1, Dtype2>(true,
false,
m,
n,
k,
1,
weights_group,
m,
din_group,
n,
0,
coldata_group,
n,
nullptr,
false,
(!flag_bias && flag_relu));
}
if (!flag_1x1s1p1) {
col2im(col_data,
chout,
hout,
wout,
kernel_h,
kernel_w,
pad_h0,
pad_h1,
pad_w0,
pad_w1,
stride_h,
stride_w,
dila_h,
dila_w,
dout_batch);
}
//! add bias
if (flag_bias) {
fill_bias_relu(
dout_batch, bias, chout, wout * hout, flag_bias, flag_relu);
}
}
free(workspace_ptr);
}
float deformable_bilinear(const float* bottom_data,
const int data_width,
const int height,
const int width,
float h,
float w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = static_cast<float>(h_low);
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = static_cast<float>(w_low);
} else {
w_high = w_low + 1;
}
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh;
float hw = 1 - lw;
float v1 = bottom_data[h_low * data_width + w_low];
float v2 = bottom_data[h_low * data_width + w_high];
float v3 = bottom_data[h_high * data_width + w_low];
float v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw;
float w2 = hh * lw;
float w3 = lh * hw;
float w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deformable_conv_basic(const Dtype1* in_data,
const float* offset_data,
const float* mask_data,
Dtype2* out_data,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
bool flag_relu,
bool modulated) {
int out_c_group = chout / group;
int in_c_group = chin / group;
int in_size = hin * win;
int out_size = hout * wout;
int c_in_size = chin * in_size;
int c_out_size = chout * out_size;
int kernel_size = kernel_w * kernel_h;
for (int n = 0; n < num; n++) {
#pragma omp parallel for collapse(4)
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < hout; oh++) {
for (int ow = 0; ow < wout; ow++) {
int out_idx = n * c_out_size + g * out_c_group * out_size +
oc * out_size + oh * wout + ow;
Dtype2 bias_d = flag_bias ? bias[g * out_c_group + oc] : 0;
out_data[out_idx] = bias_d + out_data[out_idx];
for (int ic = 0; ic < in_c_group; ++ic) {
for (int fh = 0; fh < kernel_h; fh++) {
for (int fw = 0; fw < kernel_w; fw++) {
const float* offset_data_ptr =
offset_data + n * group * 2 * kernel_size * out_size +
g * 2 * kernel_size * out_size;
const int data_offset_h_ptr =
((2 * (fh * kernel_w + fw)) * hout + oh) * wout + ow;
const int data_offset_w_ptr =
((2 * (fh * kernel_w + fw) + 1) * hout + oh) * wout + ow;
const float offset_h = offset_data_ptr[data_offset_h_ptr];
const float offset_w = offset_data_ptr[data_offset_w_ptr];
const float iw =
ow * stride_w - pad_w + kernel_w * dila_w + offset_w;
const float ih =
oh * stride_h - pad_h + kernel_h * dila_h + offset_h;
if (ih >= 0 && ih < hin && iw >= 0 && iw < win) {
const float map_h = kernel_h * dila_h + offset_h;
const float map_w = kernel_w * dila_w + offset_w;
const int cur_height = hin - (oh * stride_h - pad_h);
const int cur_width = win - (ow * stride_w - pad_w);
const float* in_data_offset =
in_data + n * c_in_size +
(g * in_c_group + ic) * in_size +
(oh * stride_h - pad_h) * win + (ow * stride_w - pad_w);
float val = deformable_bilinear(in_data_offset,
win,
cur_height,
cur_width,
map_h,
map_w);
if (modulated) {
// use mask
const float* mask_ptr =
mask_data + n * group * kernel_size * out_size +
g * kernel_size * out_size +
(fh * kernel_w + fw) * hout * wout + oh * wout + ow;
val *= mask_ptr[0];
}
int widx = g * out_c_group * in_c_group * kernel_size +
oc * in_c_group * kernel_size +
ic * kernel_size + fh * kernel_w + fw;
out_data[out_idx] += val * weights[widx];
}
}
}
}
if (flag_relu) {
out_data[out_idx] = out_data[out_idx] > 0 ? out_data[out_idx] : 0;
}
}
}
}
}
}
}
|
oned_csr.c | /* Copyright (C) 2010-2011 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
#include "common.h"
#include "oned_csr.h"
#include "redistribute.h"
#include <mpi.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
typedef struct temp_csr_graph {
size_t* restrict rowstarts; //this array contains the elements of the vertices array that begin a row
int64_t* restrict column; //this array contains which column a vertice belongs to
size_t nlocalverts;
size_t nlocaledges;
size_t nlocaledges_allocated; /* Actual size of column */
int lg_nglobalverts;
} temp_csr_graph;
static void make_empty_csr(temp_csr_graph* restrict const outg /* All fields NULL or 0 */) {
outg->rowstarts = (size_t*)xcalloc(1, sizeof(size_t));
outg->column = NULL; /* Realloc can enlarge a NULL pointer */
outg->nlocalverts = outg->nlocaledges = outg->nlocaledges_allocated = 0;
outg->lg_nglobalverts = -1;
}
static void make_csr(const packed_edge* restrict const inbuf, temp_csr_graph* restrict const outg /* Must have memory and nlocalverts/nlocaledges filled in */) {
size_t nrows = outg->nlocalverts;
size_t inbuf_size = outg->nlocaledges;
size_t* temp = (size_t*)xmalloc(nrows * sizeof(size_t));
size_t* restrict rowstarts = outg->rowstarts;
int64_t* restrict column = outg->column;
{
size_t* restrict counts = temp;
memset(counts, 0, nrows * sizeof(size_t));
ptrdiff_t i;
#pragma omp parallel for
for (i = 0; i < (ptrdiff_t)inbuf_size; ++i) {
assert ((size_t)(VERTEX_LOCAL(get_v0_from_edge(&inbuf[i]))) < nrows);
#pragma omp atomic
++counts[VERTEX_LOCAL(get_v0_from_edge(&inbuf[i]))];
}
rowstarts[0] = 0;
for (i = 0; i < nrows; ++i) {
rowstarts[i + 1] = rowstarts[i] + counts[i];
}
}
{
size_t* restrict inserts = temp;
memcpy(inserts, rowstarts, nrows * sizeof(size_t));
ptrdiff_t i;
#pragma omp parallel for
for (i = 0; i < (ptrdiff_t)inbuf_size; ++i) {
int64_t v0 = get_v0_from_edge(&inbuf[i]);
int64_t v1 = get_v1_from_edge(&inbuf[i]);
assert ((size_t)(VERTEX_LOCAL(v0)) < nrows);
size_t pos = __sync_fetch_and_add(&inserts[VERTEX_LOCAL(v0)], 1);
assert (pos < inbuf_size);
column[pos] = v1;
}
}
free(temp);
}
/* Do merge: b = b union a */
static void merge_csr(temp_csr_graph* restrict const b,
const temp_csr_graph* restrict const a) {
size_t a_nlocalverts = a->nlocalverts;
size_t b_nlocalverts = b->nlocalverts;
size_t a_nlocaledges = a->nlocaledges;
size_t b_nlocaledges = b->nlocaledges;
if (a->nlocalverts > b->nlocalverts) {
ptrdiff_t old_b_nlocalverts = b_nlocalverts, i;
b->rowstarts = (size_t*)xrealloc(b->rowstarts, (a_nlocalverts + 1) * sizeof(size_t));
b_nlocalverts = b->nlocalverts = a->nlocalverts;
#pragma omp parallel for
for (i = old_b_nlocalverts; i < b_nlocalverts; ++i) {
b->rowstarts[i + 1] = b_nlocaledges;
}
b->lg_nglobalverts = a->lg_nglobalverts;
}
if (b_nlocaledges + a_nlocaledges > b->nlocaledges_allocated) {
size_t new_alloc = b_nlocaledges + a_nlocaledges + (1 << 16);
b->nlocaledges_allocated = new_alloc;
b->column = (int64_t*)xrealloc(b->column, new_alloc * sizeof(int64_t));
}
memmove(&b->column[b->rowstarts[a_nlocalverts] + a_nlocaledges],
&b->column[b->rowstarts[a_nlocalverts]],
(b_nlocaledges - b->rowstarts[a_nlocalverts]) * sizeof(int64_t));
ptrdiff_t i_plus_1;
for (i_plus_1 = a_nlocalverts; i_plus_1 > 0; --i_plus_1) {
ptrdiff_t i = i_plus_1 - 1;
memmove(&b->column[b->rowstarts[i] + a->rowstarts[i]],
&b->column[b->rowstarts[i]],
(b->rowstarts[i + 1] - b->rowstarts[i]) * sizeof(int64_t));
memcpy(&b->column[b->rowstarts[i + 1] + a->rowstarts[i]],
&a->column[a->rowstarts[i]],
(a->rowstarts[i + 1] - a->rowstarts[i]) * sizeof(int64_t));
}
b_nlocaledges = b->nlocaledges = b_nlocaledges + a_nlocaledges;
ptrdiff_t i;
#pragma omp parallel for
for (i = 0; i <= a_nlocalverts; ++i) {
b->rowstarts[i] += a->rowstarts[i];
}
#pragma omp parallel for if(a_nlocalverts != b_nlocalverts)
for (i = a_nlocalverts + 1; i <= b_nlocalverts; ++i) {
b->rowstarts[i] += a_nlocaledges;
}
}
#define CONV1D_FUNCNAME \
convert_graph_to_oned_csr_helper
#define CONV1D_EXTRA_PARAMS \
oned_csr_graph* const g
#define CONV1D_DECLARE_AND_INIT_GRAPH_SO_FAR \
temp_csr_graph graph_so_far = {NULL, NULL, 0, 0}; \
make_empty_csr(&graph_so_far);
#define CONV1D_CALL_ON_EDGES(V0, V1, LG_NGLOBALVERTS_SO_FAR, CONT) \
CONT(VERTEX_OWNER((V0)), CONV1D_WRITE_EDGE_NORMAL) \
CONT(VERTEX_OWNER((V1)), CONV1D_WRITE_EDGE_FLIPPED)
#define CONV1D_WRITE_EDGE_NORMAL(BUF, V0, V1) \
write_edge(BUF, V0, V1);
#define CONV1D_WRITE_EDGE_FLIPPED(BUF, V0, V1) \
write_edge(BUF, V1, V0);
#define CONV1D_EDGE_BUFFER_TYPE \
packed_edge
#define CONV1D_EDGE_BUFFER_MPI_TYPE \
packed_edge_mpi_type
#define CONV1D_PRECOMPRESS_INCOMING_DATA(LG_NGLOBALVERTS_SO_FAR, EDGES_TO_RECV, EDGES_RECEIVED_THIS_BLOCK) \
size_t nlocalverts_so_far = (size_t)DIV_SIZE((UINT64_C(1) << (LG_NGLOBALVERTS_SO_FAR)) / ulong_bits_squared + size - 1) * ulong_bits_squared; \
temp_csr_graph t = { \
/* rowstarts */ (size_t*)xmalloc((size_t)(nlocalverts_so_far + 1) * sizeof(size_t)), \
/* column */ (int64_t*)xmalloc((size_t)(EDGES_RECEIVED_THIS_BLOCK) * sizeof(int64_t)), \
/* nlocalverts */ (size_t)(nlocalverts_so_far), \
/* nlocaledges */ (size_t)(EDGES_RECEIVED_THIS_BLOCK), \
/* nlocaledges_allocated */ (size_t)(EDGES_RECEIVED_THIS_BLOCK), \
/* lg_nglobalverts */ (int)(LG_NGLOBALVERTS_SO_FAR) \
}; \
make_csr((EDGES_TO_RECV), &t);
#define CONV1D_MERGE_INTO_GRAPH_SO_FAR \
size_t new_alloc = graph_so_far.nlocaledges + edges_received_this_block * (block_count - ITERATE_TUPLE_GRAPH_BLOCK_NUMBER); \
if (new_alloc > graph_so_far.nlocaledges_allocated) { \
size_t new_alloc_real = new_alloc + (1 << 16); \
graph_so_far.nlocaledges_allocated = new_alloc_real; \
graph_so_far.column = (int64_t*)xrealloc(graph_so_far.column, new_alloc_real * sizeof(int64_t)); \
} \
merge_csr(&graph_so_far, &t);
#define CONV1D_FREE_PRECOMPRESSED_DATA \
free(t.rowstarts); \
free(t.column);
#define CONV1D_BUILD_FINAL_DATA_STRUCTURE_FROM_GRAPH_SO_FAR \
g->nlocaledges = graph_so_far.nlocaledges; \
g->rowstarts = graph_so_far.rowstarts; \
g->column = (int64_t*)xrealloc(graph_so_far.column, (size_t)g->nlocaledges * sizeof(int64_t)); \
int64_t nlocalverts = (int64_t)(graph_so_far.nlocalverts); \
g->nlocalverts = (size_t)nlocalverts; \
MPI_Allreduce(&nlocalverts, &g->max_nlocalverts, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD); \
g->lg_nglobalverts = graph_so_far.lg_nglobalverts; \
g->nglobalverts = INT64_C(1) << graph_so_far.lg_nglobalverts;
#define CONV1D_CLEAR_GRAPH_SO_FAR \
free(graph_so_far.rowstarts); graph_so_far.rowstarts = NULL; \
free(graph_so_far.column); graph_so_far.column = NULL; \
graph_so_far.nlocalverts = graph_so_far.nlocaledges = graph_so_far.nlocaledges_allocated = 0;
static MAKE_REDISTRIBUTE_FUNC(CONV1D_FUNCNAME, CONV1D_EXTRA_PARAMS, CONV1D_DECLARE_AND_INIT_GRAPH_SO_FAR, CONV1D_CALL_ON_EDGES, CONV1D_EDGE_BUFFER_TYPE, CONV1D_EDGE_BUFFER_MPI_TYPE, CONV1D_PRECOMPRESS_INCOMING_DATA, CONV1D_MERGE_INTO_GRAPH_SO_FAR, CONV1D_FREE_PRECOMPRESSED_DATA, CONV1D_BUILD_FINAL_DATA_STRUCTURE_FROM_GRAPH_SO_FAR, CONV1D_CLEAR_GRAPH_SO_FAR)
void convert_graph_to_oned_csr(const tuple_graph* const tg, oned_csr_graph* const g) { \
g->tg = tg;
g->nlocaledges = 0;
convert_graph_to_oned_csr_helper(tg, g);
}
void free_oned_csr_graph(oned_csr_graph* const g) {
if (g->rowstarts != NULL) {free(g->rowstarts); g->rowstarts = NULL;}
if (g->column != NULL) {free(g->column); g->column = NULL;}
}
|
GB_binop__max_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint8)
// A*D function (colscale): GB (_AxD__max_uint8)
// D*A function (rowscale): GB (_DxB__max_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint8)
// C=scalar+B GB (_bind1st__max_uint8)
// C=scalar+B' GB (_bind1st_tran__max_uint8)
// C=A+scalar GB (_bind2nd__max_uint8)
// C=A'+scalar GB (_bind2nd_tran__max_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT8 || GxB_NO_MAX_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
9025.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(#P11)
{
#pragma omp target teams distribute #p #p
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
#pragma omp target teams distribute #p #p
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
#pragma omp target teams distribute #p #p
for (i = 0; i < _PB_N; i++)
{
#pragma omp target teams distribute #p #p
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
}
/* Calculate the m * m correlation matrix. */
#pragma omp target teams distribute #p #p
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
facorialSum.c | #include <stdio.h>
#include <omp.h>
int factorial(int n){
int f = 1, i;
#pragma omp parallel for reduction(*:f)
for(i = 1; i <= n; i++){
f *= i;
}
return f;
}
int main(){
int n, i, sum = 1;
omp_set_dynamic(0);
printf("Enter the value of N = ");
scanf("%d", &n);
int m = omp_get_num_procs();
omp_set_num_threads(m);
#pragma omp parallel for reduction(+:sum)
for(i = 2; i <= n; i++){
sum += factorial(i);
}
printf("Sum = %d\n", sum);
return 0;
} |
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
sum-gpu.c | #include <stdio.h>
#include <stdlib.h>
int main()
{
int n = 40000000;
//int n = 400000;
double* vector = (double*) malloc(n * sizeof(double));
double sum = 0;
#pragma omp parallel for simd
for(int x = 0; x < n; ++x)
vector[x] = x;
#pragma omp target teams distribute parallel for simd map(tofrom: sum) map(vector[0:n]) reduction(+: sum)
for(int y = 0; y < n; ++y)
sum += vector[y];
//printf("%lf", sum);
free(vector);
vector = NULL;
return 0;
}
|
deprecate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE %
% D D E P P R R E C A A T E %
% D D EEE PPPPP RRRR EEE C AAAAA T EEE %
% D D E P R R E C A A T E %
% DDDD EEEEE P R R EEEEE CCCC A A T EEEEE %
% %
% %
% MagickCore Deprecated Methods %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
#define WIN32_LEAN_AND_MEAN
#define VC_EXTRALEAN
#include <windows.h>
#endif
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/geometry.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/semaphore-private.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/threshold.h"
#include "magick/thread_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
#if !defined(MAGICKCORE_EXCLUDE_DEPRECATED)
/*
Global declarations.
*/
static MonitorHandler
monitor_handler = (MonitorHandler) NULL;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e C a c h e V i e w I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireCacheViewIndexes() returns the indexes associated with the specified
% view.
%
% Deprecated, replace with:
%
% GetCacheViewVirtualIndexQueue(cache_view);
%
% The format of the AcquireCacheViewIndexes method is:
%
% const IndexPacket *AcquireCacheViewIndexes(const CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport const IndexPacket *AcquireCacheViewIndexes(
const CacheView *cache_view)
{
return(GetCacheViewVirtualIndexQueue(cache_view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireCacheViewPixels() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception);
%
% The format of the AcquireCacheViewPixels method is:
%
% const PixelPacket *AcquireCacheViewPixels(const CacheView *cache_view,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *AcquireCacheViewPixels(
const CacheView *cache_view,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
return(GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImagePixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in RAM, or in a memory-mapped file. The
% returned pointer should *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the AcquireImagePixels() and GetAuthenticPixels() methods are not
% thread-safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% Deprecated, replace with:
%
% GetVirtualPixels(image,x,y,columns,rows,exception);
%
% The format of the AcquireImagePixels() method is:
%
% const PixelPacket *AcquireImagePixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *AcquireImagePixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
return(GetVirtualPixels(image,x,y,columns,rows,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireIndexes() returns the black channel or the colormap indexes
% associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% Deprecated, replace with:
%
% GetVirtualIndexQueue(image);
%
% The format of the AcquireIndexes() method is:
%
% const IndexPacket *AcquireIndexes(const Image *image)
%
% A description of each parameter follows:
%
% o indexes: AcquireIndexes() returns the indexes associated with the last
% call to QueueAuthenticPixels() or GetVirtualPixels().
%
% o image: the image.
%
*/
MagickExport const IndexPacket *AcquireIndexes(const Image *image)
{
return(GetVirtualIndexQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMemory() returns a pointer to a block of memory at least size bytes
% suitably aligned for any use.
%
% The format of the AcquireMemory method is:
%
% void *AcquireMemory(const size_t size)
%
% A description of each parameter follows:
%
% o size: the size of the memory in bytes to allocate.
%
*/
MagickExport void *AcquireMemory(const size_t size)
{
void
*allocation;
assert(size != 0);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
allocation=malloc(size);
return(allocation);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e C a c h e V i e w P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneCacheViewPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead.
%
% Deprecated, replace with:
%
% GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception);
%
% The format of the AcquireOneCacheViewPixel method is:
%
% MagickBooleanType AcquireOneCacheViewPixel(const CacheView *cache_view,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y: These values define the offset of the pixel.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireOneCacheViewPixel(
const CacheView *cache_view,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
return(GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e C a c h e V i e w V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneCacheViewVirtualPixel() returns a single pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead.
%
% Deprecated, replace with:
%
% GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method,
% x,y,pixel,exception);
%
% The format of the AcquireOneCacheViewPixel method is:
%
% MagickBooleanType AcquireOneCacheViewVirtualPixel(
% const CacheView *cache_view,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the offset of the pixel.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireOneCacheViewVirtualPixel(
const CacheView *cache_view,const VirtualPixelMethod virtual_pixel_method,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method,
x,y,pixel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOnePixel() instead.
%
% Deprecated, replace with:
%
% MagickPixelPacket pixel;
% GetOneVirtualMagickPixel(image,x,y,&pixel,exception);
%
% The format of the AcquireOneMagickPixel() method is:
%
% MagickPixelPacket AcquireOneMagickPixel(const Image image,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickPixelPacket AcquireOneMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
(void) GetOneVirtualMagickPixel(image,x,y,&pixel,exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOnePixel() returns a single pixel at the specified (x,y) location.
% The image background color is returned if an error occurs. If you plan to
% modify the pixel, use GetOnePixel() instead.
%
% Deprecated, replace with:
%
% PixelPacket pixel;
% GetOneVirtualPixel(image,x,y,&pixel,exception);
%
% The format of the AcquireOnePixel() method is:
%
% PixelPacket AcquireOnePixel(const Image image,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket AcquireOnePixel(const Image *image,const ssize_t x,
const ssize_t y,ExceptionInfo *exception)
{
PixelPacket
pixel;
(void) GetOneVirtualPixel(image,x,y,&pixel,exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireOneVirtualPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOnePixel() instead.
%
% Deprecated, replace with:
%
% PixelPacket pixel;
% GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel,exception);
%
% The format of the AcquireOneVirtualPixel() method is:
%
% PixelPacket AcquireOneVirtualPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o virtual_pixel_method: the virtual pixel method.
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket AcquireOneVirtualPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
ExceptionInfo *exception)
{
PixelPacket
pixel;
(void) GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel,
exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixels() returns the pixels associated with the last call to
% QueueAuthenticPixels() or GetVirtualPixels().
%
% Deprecated, replace with:
%
% GetVirtualPixelQueue(image);
%
% The format of the AcquirePixels() method is:
%
% const PixelPacket *AcquirePixels(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *AcquirePixels(const Image *image)
{
return(GetVirtualPixelQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireSemaphoreInfo() acquires a semaphore.
%
% The format of the AcquireSemaphoreInfo method is:
%
% void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info)
{
assert(semaphore_info != (SemaphoreInfo **) NULL);
if (*semaphore_info == (SemaphoreInfo *) NULL)
{
InitializeMagickMutex();
LockMagickMutex();
if (*semaphore_info == (SemaphoreInfo *) NULL)
*semaphore_info=AllocateSemaphoreInfo();
UnlockMagickMutex();
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffinityImage() replaces the colors of an image with the closest color from
% a reference image.
%
% Deprecated, replace with:
%
% RemapImage(quantize_info,image,affinity_image);
%
% The format of the AffinityImage method is:
%
% MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *affinity_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o affinity_image: the reference image.
%
*/
MagickExport MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info,
Image *image,const Image *affinity_image)
{
return(RemapImage(quantize_info,image,affinity_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n i t y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffinityImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% Deprecated, replace with:
%
% RemapImages(quantize_info,images,affinity_image);
%
% The format of the AffinityImage method is:
%
% MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info,
% Image *images,Image *affinity_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o affinity_image: the reference image.
%
*/
MagickExport MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info,
Image *images,const Image *affinity_image)
{
return(RemapImages(quantize_info,images,affinity_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateImage() returns a pointer to an image structure initialized to
% default values.
%
% Deprecated, replace with:
%
% AcquireImage(image_info);
%
% The format of the AllocateImage method is:
%
% Image *AllocateImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AllocateImage(const ImageInfo *image_info)
{
return(AcquireImage(image_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AllocateImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% Deprecated, replace with:
%
% AcquireImageColormap(image,colors);
%
% The format of the AllocateImageColormap method is:
%
% MagickBooleanType AllocateImageColormap(Image *image,
% const size_t colors)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
*/
MagickExport MagickBooleanType AllocateImageColormap(Image *image,
const size_t colors)
{
return(AcquireImageColormap(image,colors));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% Deprecated, replace with:
%
% AcquireNextImage(image_info,image);
%
% The format of the AllocateNextImage method is:
%
% void AllocateNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AllocateNextImage(const ImageInfo *image_info,Image *image)
{
AcquireNextImage(image_info,image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A l l o c a t e S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AllocateString() allocates memory for a string and copies the source string
% to that memory location (and returns it).
%
% The format of the AllocateString method is:
%
% char *AllocateString(const char *source)
%
% A description of each parameter follows:
%
% o source: A character string.
%
*/
MagickExport char *AllocateString(const char *source)
{
char
*destination;
size_t
length;
assert(source != (const char *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
length=strlen(source)+MaxTextExtent+1;
destination=(char *) AcquireQuantumMemory(length,sizeof(*destination));
if (destination == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*destination='\0';
(void) CopyMagickString(destination,source,length);
return(destination);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A v e r a g e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AverageImages() takes a set of images and averages them together. Each
% image in the set must have the same width and height. AverageImages()
% returns a single image with each corresponding pixel component of each
% image averaged. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MeanEvaluateOperator,exception);
%
% The format of the AverageImages method is:
%
% Image *AverageImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AverageImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MeanEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Extract a channel from the image. A channel is a particular color component
% of each pixel in the image.
%
% Deprecated, replace with:
%
% SeparateImageChannel(image,channel);
%
% The format of the ChannelImage method is:
%
% unsigned int ChannelImage(Image *image,const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channel to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, YellowChannel,
% or BlackChannel.
%
*/
MagickExport unsigned int ChannelImage(Image *image,const ChannelType channel)
{
return(SeparateImageChannel(image,channel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelThresholdImage() changes the value of individual pixels based on
% the intensity of each pixel channel. The result is a high-contrast image.
%
% The format of the ChannelThresholdImage method is:
%
% unsigned int ChannelThresholdImage(Image *image,const char *level)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: define the threshold values.
%
*/
MagickExport unsigned int ChannelThresholdImage(Image *image,const char *level)
{
MagickPixelPacket
threshold;
GeometryInfo
geometry_info;
unsigned int
flags,
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (level == (char *) NULL)
return(MagickFalse);
flags=ParseGeometry(level,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold.green=threshold.red;
threshold.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
threshold.blue=threshold.red;
status=BilevelImageChannel(image,RedChannel,threshold.red);
status&=BilevelImageChannel(image,GreenChannel,threshold.green);
status&=BilevelImageChannel(image,BlueChannel,threshold.blue);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPathImage() sets the image clip mask based any clipping path information
% if it exists.
%
% Deprecated, replace with:
%
% ClipImagePath(image,pathname,inside);
%
% The format of the ClipImage method is:
%
% MagickBooleanType ClipPathImage(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipPathImage(Image *image,const char *pathname,
const MagickBooleanType inside)
{
return(ClipImagePath(image,pathname,inside));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageAttributes() clones one or more image attributes.
%
% Deprecated, replace with:
%
% CloneImageProperties(image,clone_image);
%
% The format of the CloneImageAttributes method is:
%
% MagickBooleanType CloneImageAttributes(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageAttributes(Image *image,
const Image *clone_image)
{
return(CloneImageProperties(image,clone_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneMemory() copies size bytes from memory area source to the destination.
% Copying between objects that overlap will take place correctly. It returns
% destination.
%
% The format of the CloneMemory method is:
%
% void *CloneMemory(void *destination,const void *source,
% const size_t size)
%
% A description of each parameter follows:
%
% o destination: the destination.
%
% o source: the source.
%
% o size: the size of the memory in bytes to allocate.
%
*/
MagickExport void *CloneMemory(void *destination,const void *source,
const size_t size)
{
register const unsigned char
*p;
register unsigned char
*q;
register ssize_t
i;
assert(destination != (void *) NULL);
assert(source != (const void *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
p=(const unsigned char *) source;
q=(unsigned char *) destination;
if ((p <= q) || ((p+size) >= q))
return(memcpy(destination,source,size));
/*
Overlap, copy backwards.
*/
p+=size;
q+=size;
for (i=(ssize_t) (size-1); i >= 0; i--)
*--q=(*--p);
return(destination);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o s e C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloseCacheView() closes the specified view returned by a previous call to
% OpenCacheView().
%
% Deprecated, replace with:
%
% DestroyCacheView(view_info);
%
% The format of the CloseCacheView method is:
%
% CacheView *CloseCacheView(CacheView *view_info)
%
% A description of each parameter follows:
%
% o view_info: the address of a structure of type CacheView.
%
*/
MagickExport CacheView *CloseCacheView(CacheView *view_info)
{
return(DestroyCacheView(view_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorFloodfill() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% The format of the ColorFloodfillImage method is:
%
% MagickBooleanType ColorFloodfillImage(Image *image,
% const DrawInfo *draw_info,const PixelPacket target,
% const ssize_t x_offset,const ssize_t y_offset,const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x,y: the starting location of the operation.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
#define MaxStacksize (1UL << 15)
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryImageException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
MagickExport MagickBooleanType ColorFloodfillImage(Image *image,
const DrawInfo *draw_info,const PixelPacket target,const ssize_t x_offset,
const ssize_t y_offset,const PaintMethod method)
{
Image
*floodplane_image;
MagickBooleanType
skip;
PixelPacket
fill_color;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
/*
Set floodfill color.
*/
segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize,
sizeof(*segment_stack));
if (segment_stack == (SegmentInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Push initial segment on stack.
*/
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
while (s > segment_stack)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception);
q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
p--;
q--;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetVirtualPixels(image,x,y,image->columns-x,1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1,
&image->exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
p++;
q++;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Tile fill color onto floodplane.
*/
p=GetVirtualPixels(floodplane_image,0,y,image->columns,1,
&image->exception);
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
{
(void) GetFillColor(draw_info,x,y,&fill_color);
MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q,
(MagickRealType) q->opacity,q);
}
p++;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack);
floodplane_image=DestroyImage(floodplane_image);
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s t i t u t e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteComponentGenesis() instantiates the constitute component.
%
% The format of the ConstituteComponentGenesis method is:
%
% MagickBooleanType ConstituteComponentGenesis(void)
%
*/
MagickExport MagickBooleanType ConstituteComponentGenesis(void)
{
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s t i t u t e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteComponentTerminus() destroys the constitute component.
%
% The format of the ConstituteComponentTerminus method is:
%
% ConstituteComponentTerminus(void)
%
*/
MagickExport void ConstituteComponentTerminus(void)
{
}
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o H B i t m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToHBITMAP() extracts a specified region of the image and returns
% it as a Windows HBITMAP. While the same functionality can be accomplished by
% invoking CropImage() followed by ImageToHBITMAP(), this method is more
% efficient since it copies pixels directly to the HBITMAP.
%
% The format of the CropImageToHBITMAP method is:
%
% HBITMAP CropImageToHBITMAP(Image* image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *CropImageToHBITMAP(Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
BITMAP
bitmap;
HBITMAP
bitmapH;
HANDLE
bitmap_bitsH;
MagickBooleanType
proceed;
RectangleInfo
page;
register const PixelPacket
*p;
register RGBQUAD
*q;
RGBQUAD
*bitmap_bits;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (((geometry->x+(ssize_t) geometry->width) < 0) ||
((geometry->y+(ssize_t) geometry->height) < 0) ||
(geometry->x >= (ssize_t) image->columns) ||
(geometry->y >= (ssize_t) image->rows))
ThrowImageException(OptionError,"GeometryDoesNotContainImage");
page=(*geometry);
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if (page.x < 0)
{
page.width+=page.x;
page.x=0;
}
if (page.y < 0)
{
page.height+=page.y;
page.y=0;
}
if ((page.width == 0) || (page.height == 0))
ThrowImageException(OptionError,"GeometryDimensionsAreZero");
/*
Initialize crop image attributes.
*/
bitmap.bmType = 0;
bitmap.bmWidth = (LONG) page.width;
bitmap.bmHeight = (LONG) page.height;
bitmap.bmWidthBytes = bitmap.bmWidth * 4;
bitmap.bmPlanes = 1;
bitmap.bmBitsPixel = 32;
bitmap.bmBits = NULL;
bitmap_bitsH=(HANDLE) GlobalAlloc(GMEM_MOVEABLE | GMEM_DDESHARE,page.width*
page.height*bitmap.bmBitsPixel);
if (bitmap_bitsH == NULL)
return(NULL);
bitmap_bits=(RGBQUAD *) GlobalLock((HGLOBAL) bitmap_bitsH);
if ( bitmap.bmBits == NULL )
bitmap.bmBits = bitmap_bits;
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
SetImageColorspace(image,sRGBColorspace);
/*
Extract crop image.
*/
q=bitmap_bits;
for (y=0; y < (ssize_t) page.height; y++)
{
register ssize_t
x;
p=GetVirtualPixels(image,page.x,page.y+y,page.width,1,exception);
if (p == (const PixelPacket *) NULL)
break;
/* Transfer pixels, scaling to Quantum */
for( x=(ssize_t) page.width ; x> 0 ; x-- )
{
q->rgbRed = ScaleQuantumToChar(GetPixelRed(p));
q->rgbGreen = ScaleQuantumToChar(GetPixelGreen(p));
q->rgbBlue = ScaleQuantumToChar(GetPixelBlue(p));
q->rgbReserved = 0;
p++;
q++;
}
proceed=SetImageProgress(image,CropImageTag,y,page.height);
if (proceed == MagickFalse)
break;
}
if (y < (ssize_t) page.height)
{
GlobalUnlock((HGLOBAL) bitmap_bitsH);
GlobalFree((HGLOBAL) bitmap_bitsH);
return((void *) NULL);
}
bitmap.bmBits=bitmap_bits;
bitmapH=CreateBitmapIndirect(&bitmap);
GlobalUnlock((HGLOBAL) bitmap_bitsH);
GlobalFree((HGLOBAL) bitmap_bitsH);
return((void *) bitmapH);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageAttribute() deletes an attribute from the image.
%
% Deprecated, replace with:
%
% DeleteImageProperty(image,key);
%
% The format of the DeleteImageAttribute method is:
%
% MagickBooleanType DeleteImageAttribute(Image *image,const char *key)
%
% A description of each parameter follows:
%
% o image: the image info.
%
% o key: the image key.
%
*/
MagickExport MagickBooleanType DeleteImageAttribute(Image *image,
const char *key)
{
return(DeleteImageProperty(image,key));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageList() deletes an image at the specified position in the list.
%
% The format of the DeleteImageList method is:
%
% unsigned int DeleteImageList(Image *images,const ssize_t offset)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
*/
MagickExport unsigned int DeleteImageList(Image *images,const ssize_t offset)
{
register ssize_t
i;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
while (GetPreviousImageInList(images) != (Image *) NULL)
images=GetPreviousImageInList(images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(images) == (Image *) NULL)
return(MagickFalse);
images=GetNextImageInList(images);
}
DeleteImageFromList(&images);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteMagickRegistry() deletes an entry in the registry as defined by the id.
% It returns MagickTrue if the entry is deleted otherwise MagickFalse if no
% entry is found in the registry that matches the id.
%
% Deprecated, replace with:
%
% char key[MaxTextExtent];
% FormatLocaleString(key,MaxTextExtent,"%ld\n",id);
% DeleteImageRegistry(key);
%
% The format of the DeleteMagickRegistry method is:
%
% MagickBooleanType DeleteMagickRegistry(const ssize_t id)
%
% A description of each parameter follows:
%
% o id: the registry id.
%
*/
MagickExport MagickBooleanType DeleteMagickRegistry(const ssize_t id)
{
char
key[MaxTextExtent];
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
return(DeleteImageRegistry(key));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C o n s t i t u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyConstitute() destroys the constitute component.
%
% The format of the DestroyConstitute method is:
%
% DestroyConstitute(void)
%
*/
MagickExport void DestroyConstitute(void)
{
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMagickRegistry() deallocates memory associated the magick registry.
%
% Deprecated, replace with:
%
% RegistryComponentTerminus();
%
% The format of the DestroyMagickRegistry method is:
%
% void DestroyMagickRegistry(void)
%
*/
MagickExport void DestroyMagickRegistry(void)
{
RegistryComponentTerminus();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s c r i b e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DescribeImage() describes an image by printing its attributes to the file.
% Attributes include the image width, height, size, and others.
%
% Deprecated, replace with:
%
% IdentifyImage(image,file,verbose);
%
% The format of the DescribeImage method is:
%
% MagickBooleanType DescribeImage(Image *image,FILE *file,
% const MagickBooleanType verbose)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o file: the file, typically stdout.
%
% o verbose: A value other than zero prints more detailed information
% about the image.
%
*/
MagickExport MagickBooleanType DescribeImage(Image *image,FILE *file,
const MagickBooleanType verbose)
{
return(IdentifyImage(image,file,verbose));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageAttributes() deallocates memory associated with the image
% attribute list.
%
% The format of the DestroyImageAttributes method is:
%
% DestroyImageAttributes(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageAttributes(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->attributes != (void *) NULL)
image->attributes=(void *) DestroySplayTree((SplayTreeInfo *)
image->attributes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImages() destroys an image list.
%
% Deprecated, replace with:
%
% DestroyImageList(image);
%
% The format of the DestroyImages method is:
%
% void DestroyImages(Image *image)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
*/
MagickExport void DestroyImages(Image *image)
{
if (image == (Image *) NULL)
return;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3");
image=DestroyImageList(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a g i c k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMagick() destroys the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreTerminus();
%
% The format of the DestroyMagick function is:
%
% DestroyMagick(void)
%
*/
MagickExport void DestroyMagick(void)
{
MagickCoreTerminus();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s p a t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DispatchImage() extracts pixel data from an image and returns it to you.
% The method returns MagickFalse on success otherwise MagickTrue if an error is
% encountered. The data is returned as char, short int, int, ssize_t, float,
% or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% DispatchImage(image,0,0,640,1,"RGB",CharPixel,pixels,exception);
%
% Deprecated, replace with:
%
% ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels,
% exception);
%
% The format of the DispatchImage method is:
%
% unsigned int DispatchImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,const size_t columns,
% const size_t rows,const char *map,const StorageType type,
% void *pixels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset, y_offset, columns, rows: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha, C = cyan, Y = yellow, M = magenta, K = black, or
% I = intensity (for grayscale).
%
% o type: Define the data type of the pixels. Float and double types are
% normalized to [0..1] otherwise [0..QuantumRange]. Choose from these
% types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, or
% DoublePixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int DispatchImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const char *map,const StorageType type,void *pixels,ExceptionInfo *exception)
{
unsigned int
status;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
status=ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels,
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t r a c t S u b i m a g e F r o m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtractSubimageFromImageImage() extracts a region of the image that most
% closely resembles the reference.
%
% The format of the ExtractSubimageFromImageImage method is:
%
% Image *ExtractSubimageFromImage(const Image *image,
% const Image *reference,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const ssize_t x_offset,const ssize_t y_offset,
const double similarity_threshold,ExceptionInfo *exception)
{
CacheView
*image_view,
*reference_view;
double
channels,
normalized_similarity,
similarity;
ssize_t
y;
/*
Compute the similarity in pixels between two images.
*/
normalized_similarity=1.0;
similarity=0.0;
channels=3;
if ((image->matte != MagickFalse) && (reference->matte != MagickFalse))
channels++;
if ((image->colorspace == CMYKColorspace) &&
(reference->colorspace == CMYKColorspace))
channels++;
image_view=AcquireVirtualCacheView(image,exception);
reference_view=AcquireVirtualCacheView(reference,exception);
for (y=0; y < (ssize_t) reference->rows; y++)
{
register const IndexPacket
*indexes,
*reference_indexes;
register const PixelPacket
*p,
*q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset+y,
reference->columns,1,exception);
q=GetCacheViewVirtualPixels(reference_view,0,y,reference->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
reference_indexes=GetCacheViewVirtualIndexQueue(reference_view);
for (x=0; x < (ssize_t) reference->columns; x++)
{
MagickRealType
pixel;
pixel=QuantumScale*(GetPixelRed(p)-(double)
GetPixelRed(q));
similarity+=pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(p)-(double)
GetPixelGreen(q));
similarity+=pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(p)-(double)
GetPixelBlue(q));
similarity+=pixel*pixel;
if ((image->matte != MagickFalse) && (reference->matte != MagickFalse))
{
pixel=QuantumScale*(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
similarity+=pixel*pixel;
}
if ((image->colorspace == CMYKColorspace) &&
(reference->colorspace == CMYKColorspace))
{
pixel=QuantumScale*(GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reference_indexes+x));
similarity+=pixel*pixel;
}
p++;
q++;
}
normalized_similarity=sqrt(similarity)/reference->columns/reference->rows/
channels;
if (normalized_similarity > similarity_threshold)
break;
}
reference_view=DestroyCacheView(reference_view);
image_view=DestroyCacheView(image_view);
return(normalized_similarity);
}
MagickExport Image *ExtractSubimageFromImage(Image *image,
const Image *reference,ExceptionInfo *exception)
{
double
similarity_threshold;
RectangleInfo
offset;
ssize_t
y;
/*
Extract reference from image.
*/
if ((reference->columns > image->columns) || (reference->rows > image->rows))
return((Image *) NULL);
similarity_threshold=(double) image->columns*image->rows;
SetGeometry(reference,&offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows); y++)
{
double
similarity;
register ssize_t
x;
for (x=0; x < (ssize_t) (image->columns-reference->columns); x++)
{
similarity=GetSimilarityMetric(image,reference,x,y,similarity_threshold,
exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExtractSubimageFromImage)
#endif
if (similarity < similarity_threshold)
{
similarity_threshold=similarity;
offset.x=x;
offset.y=y;
}
}
}
if (similarity_threshold > (QuantumScale*reference->fuzz/100.0))
return((Image *) NULL);
return(CropImage(image,&offset,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l a t t e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlattenImages() Obsolete Function: Use MergeImageLayers() instead.
%
% Deprecated, replace with:
%
% MergeImageLayers(image,FlattenLayer,exception);
%
% The format of the FlattenImage method is:
%
% Image *FlattenImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlattenImages(Image *image,ExceptionInfo *exception)
{
return(MergeImageLayers(image,FlattenLayer,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatImageAttribute() permits formatted key/value pairs to be saved as an
% image attribute.
%
% The format of the FormatImageAttribute method is:
%
% MagickBooleanType FormatImageAttribute(Image *image,const char *key,
% const char *format,...)
%
% A description of each parameter follows.
%
% o image: The image.
%
% o key: The attribute key.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport MagickBooleanType FormatImageAttributeList(Image *image,
const char *key,const char *format,va_list operands)
{
char
value[MaxTextExtent];
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(value,MaxTextExtent,format,operands);
#else
n=vsprintf(value,format,operands);
#endif
if (n < 0)
value[MaxTextExtent-1]='\0';
return(SetImageProperty(image,key,value));
}
MagickExport MagickBooleanType FormatImagePropertyList(Image *image,
const char *property,const char *format,va_list operands)
{
char
value[MaxTextExtent];
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(value,MaxTextExtent,format,operands);
#else
n=vsprintf(value,format,operands);
#endif
if (n < 0)
value[MaxTextExtent-1]='\0';
return(SetImageProperty(image,property,value));
}
MagickExport MagickBooleanType FormatImageAttribute(Image *image,
const char *key,const char *format,...)
{
char
value[MaxTextExtent];
int
n;
va_list
operands;
va_start(operands,format);
n=FormatLocaleStringList(value,MaxTextExtent,format,operands);
(void) n;
va_end(operands);
return(SetImageProperty(image,key,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t M a g i c k S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatMagickString() prints formatted output of a variable argument list.
%
% The format of the FormatMagickString method is:
%
% ssize_t FormatMagickString(char *string,const size_t length,
% const char *format,...)
%
% A description of each parameter follows.
%
% o string: FormatMagickString() returns the formatted string in this
% character buffer.
%
% o length: the maximum length of the string.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport ssize_t FormatMagickStringList(char *string,const size_t length,
const char *format,va_list operands)
{
int
n;
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(string,length,format,operands);
#else
n=vsprintf(string,format,operands);
#endif
if (n < 0)
string[length-1]='\0';
return((ssize_t) n);
}
MagickExport ssize_t FormatMagickString(char *string,const size_t length,
const char *format,...)
{
ssize_t
n;
va_list
operands;
va_start(operands,format);
n=(ssize_t) FormatMagickStringList(string,length,format,operands);
va_end(operands);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t S t r i n g %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatString() prints formatted output of a variable argument list.
%
% The format of the FormatString method is:
%
% void FormatString(char *string,const char *format,...)
%
% A description of each parameter follows.
%
% o string: Method FormatString returns the formatted string in this
% character buffer.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport void FormatStringList(char *string,const char *format,
va_list operands)
{
int
n;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
#if defined(MAGICKCORE_HAVE_VSNPRINTF)
n=vsnprintf(string,MaxTextExtent,format,operands);
#else
n=vsprintf(string,format,operands);
#endif
if (n < 0)
string[MaxTextExtent-1]='\0';
}
MagickExport void FormatString(char *string,const char *format,...)
{
va_list
operands;
va_start(operands,format);
(void) FormatLocaleStringList(string,MaxTextExtent,format,operands);
va_end(operands);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y C o l o r M a t c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyColorMatch() returns true if two pixels are identical in color.
%
% The format of the ColorMatch method is:
%
% void FuzzyColorMatch(const PixelPacket *p,const PixelPacket *q,
% const double fuzz)
%
% A description of each parameter follows:
%
% o p: Pixel p.
%
% o q: Pixel q.
%
% o distance: Define how much tolerance is acceptable to consider
% two colors as the same.
%
*/
MagickExport unsigned int FuzzyColorMatch(const PixelPacket *p,
const PixelPacket *q,const double fuzz)
{
MagickPixelPacket
pixel;
register MagickRealType
distance;
if ((fuzz == 0.0) && (GetPixelRed(p) == GetPixelRed(q)) &&
(GetPixelGreen(p) == GetPixelGreen(q)) &&
(GetPixelBlue(p) == GetPixelBlue(q)))
return(MagickTrue);
pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q);
distance=pixel.red*pixel.red;
if (distance > (fuzz*fuzz))
return(MagickFalse);
pixel.green=GetPixelGreen(p)-(MagickRealType)
GetPixelGreen(q);
distance+=pixel.green*pixel.green;
if (distance > (fuzz*fuzz))
return(MagickFalse);
pixel.blue=GetPixelBlue(p)-(MagickRealType) GetPixelBlue(q);
distance+=pixel.blue*pixel.blue;
if (distance > (fuzz*fuzz))
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y C o l o r C o m p a r e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyColorCompare() returns MagickTrue if the distance between two colors is
% less than the specified distance in a linear three dimensional color space.
% This method is used by ColorFloodFill() and other algorithms which
% compare two colors.
%
% The format of the FuzzyColorCompare method is:
%
% void FuzzyColorCompare(const Image *image,const PixelPacket *p,
% const PixelPacket *q)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o p: Pixel p.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType FuzzyColorCompare(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5");
return(IsColorSimilar(image,p,q));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F u z z y O p a c i t y C o m p a r e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FuzzyOpacityCompare() returns true if the distance between two opacity
% values is less than the specified distance in a linear color space. This
% method is used by MatteFloodFill() and other algorithms which compare
% two opacity values.
%
% Deprecated, replace with:
%
% IsOpacitySimilar(image,p,q);
%
% The format of the FuzzyOpacityCompare method is:
%
% void FuzzyOpacityCompare(const Image *image,const PixelPacket *p,
% const PixelPacket *q)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o p: Pixel p.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType FuzzyOpacityCompare(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5");
return(IsOpacitySimilar(image,p,q));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C o n f i g u r e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetConfigureBlob() returns the specified configure file as a blob.
%
% The format of the GetConfigureBlob method is:
%
% void *GetConfigureBlob(const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o filename: the configure file name.
%
% o path: return the full path information of the configure file.
%
% o length: This pointer to a size_t integer sets the initial length of the
% blob. On return, it reflects the actual length of the blob.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetConfigureBlob(const char *filename,char *path,
size_t *length,ExceptionInfo *exception)
{
void
*blob;
assert(filename != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",filename);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
assert(path != (char *) NULL);
assert(length != (size_t *) NULL);
assert(exception != (ExceptionInfo *) NULL);
blob=(void *) NULL;
(void) CopyMagickString(path,filename,MaxTextExtent);
#if defined(MAGICKCORE_INSTALLED_SUPPORT)
#if defined(MAGICKCORE_LIBRARY_PATH)
if (blob == (void *) NULL)
{
/*
Search hard coded paths.
*/
(void) FormatLocaleString(path,MaxTextExtent,"%s%s",
MAGICKCORE_LIBRARY_PATH,filename);
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
#endif
#if defined(MAGICKCORE_WINDOWS_SUPPORT) && !(defined(MAGICKCORE_CONFIGURE_PATH) || defined(MAGICKCORE_SHARE_PATH))
if (blob == (void *) NULL)
{
unsigned char
*key_value;
/*
Locate file via registry key.
*/
key_value=NTRegistryKeyLookup("ConfigurePath");
if (key_value != (unsigned char *) NULL)
{
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",(char *)
key_value,DirectorySeparator,filename);
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
}
#endif
#else
if (blob == (void *) NULL)
{
char
*home;
home=GetEnvironmentValue("MAGICK_HOME");
if (home != (char *) NULL)
{
/*
Search MAGICK_HOME.
*/
#if !defined(MAGICKCORE_POSIX_SUPPORT)
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",home,
DirectorySeparator,filename);
#else
(void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",home,
MAGICKCORE_LIBRARY_RELATIVE_PATH,filename);
#endif
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
home=DestroyString(home);
}
home=GetEnvironmentValue("HOME");
if (home == (char *) NULL)
home=GetEnvironmentValue("USERPROFILE");
if (home != (char *) NULL)
{
/*
Search $HOME/.magick.
*/
(void) FormatLocaleString(path,MaxTextExtent,"%s%s.magick%s%s",home,
DirectorySeparator,DirectorySeparator,filename);
if ((IsPathAccessible(path) != MagickFalse) && (blob == (void *) NULL))
blob=FileToBlob(path,~0UL,length,exception);
home=DestroyString(home);
}
}
if ((blob == (void *) NULL) && (*GetClientPath() != '\0'))
{
#if !defined(MAGICKCORE_POSIX_SUPPORT)
(void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",GetClientPath(),
DirectorySeparator,filename);
#else
char
prefix[MaxTextExtent];
/*
Search based on executable directory if directory is known.
*/
(void) CopyMagickString(prefix,GetClientPath(),
MaxTextExtent);
ChopPathComponents(prefix,1);
(void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",prefix,
MAGICKCORE_LIBRARY_RELATIVE_PATH,filename);
#endif
if (IsPathAccessible(path) != MagickFalse)
blob=FileToBlob(path,~0UL,length,exception);
}
/*
Search current directory.
*/
if ((blob == (void *) NULL) && (IsPathAccessible(path) != MagickFalse))
blob=FileToBlob(path,~0UL,length,exception);
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
/*
Search Windows registry.
*/
if (blob == (void *) NULL)
blob=NTResourceToBlob(filename);
#endif
#endif
if (blob == (void *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),ConfigureWarning,
"UnableToOpenConfigureFile","`%s'",path);
return(blob);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheView() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned if
% the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the GetCacheView method is:
%
% PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the address of a structure of type CacheView.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheViewIndexes() returns the indexes associated with the specified
% view.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticIndexQueue(cache_view);
%
% The format of the GetCacheViewIndexes method is:
%
% IndexPacket *GetCacheViewIndexes(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport IndexPacket *GetCacheViewIndexes(CacheView *cache_view)
{
return(GetCacheViewAuthenticIndexQueue(cache_view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned if
% the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the GetCacheViewPixels method is:
%
% PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t E x c e p t i o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetExceptionInfo() initializes an exception to default values.
%
% The format of the GetExceptionInfo method is:
%
% GetExceptionInfo(ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o exception: the exception info.
%
*/
MagickExport void GetExceptionInfo(ExceptionInfo *exception)
{
assert(exception != (ExceptionInfo *) NULL);
(void) memset(exception,0,sizeof(*exception));
exception->severity=UndefinedException;
exception->exceptions=(void *) NewLinkedList(0);
exception->semaphore=AllocateSemaphoreInfo();
exception->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAttribute() searches the list of image attributes and returns
% a pointer to the attribute if it exists otherwise NULL.
%
% The format of the GetImageAttribute method is:
%
% const ImageAttribute *GetImageAttribute(const Image *image,
% const char *key)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o key: These character strings are the name of an image attribute to
% return.
%
*/
static void *DestroyAttribute(void *attribute)
{
register ImageAttribute
*p;
p=(ImageAttribute *) attribute;
if (p->value != (char *) NULL)
p->value=DestroyString(p->value);
return(RelinquishMagickMemory(p));
}
MagickExport const ImageAttribute *GetImageAttribute(const Image *image,
const char *key)
{
const char
*value;
ImageAttribute
*attribute;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
value=GetImageProperty(image,key);
if (value == (const char *) NULL)
return((const ImageAttribute *) NULL);
if (image->attributes == (void *) NULL)
((Image *) image)->attributes=NewSplayTree(CompareSplayTreeString,
RelinquishMagickMemory,DestroyAttribute);
else
{
const ImageAttribute
*attribute;
attribute=(const ImageAttribute *) GetValueFromSplayTree((SplayTreeInfo *)
image->attributes,key);
if (attribute != (const ImageAttribute *) NULL)
return(attribute);
}
attribute=(ImageAttribute *) AcquireMagickMemory(sizeof(*attribute));
if (attribute == (ImageAttribute *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(attribute,0,sizeof(*attribute));
attribute->key=ConstantString(key);
attribute->value=ConstantString(value);
(void) AddValueToSplayTree((SplayTreeInfo *) ((Image *) image)->attributes,
attribute->key,attribute);
return((const ImageAttribute *) attribute);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p p i n g P a t h A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClippingPathAttribute() searches the list of image attributes and
% returns a pointer to a clipping path if it exists otherwise NULL.
%
% Deprecated, replace with:
%
% GetImageAttribute(image,"8BIM:1999,2998");
%
% The format of the GetImageClippingPathAttribute method is:
%
% const ImageAttribute *GetImageClippingPathAttribute(Image *image)
%
% A description of each parameter follows:
%
% o attribute: Method GetImageClippingPathAttribute returns the clipping
% path if it exists otherwise NULL.
%
% o image: the image.
%
*/
MagickExport const ImageAttribute *GetImageClippingPathAttribute(Image *image)
{
return(GetImageAttribute(image,"8BIM:1999,2998"));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e F r o m M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageFromMagickRegistry() gets an image from the registry as defined by
% its name. If the image is not found, a NULL image is returned.
%
% Deprecated, replace with:
%
% GetImageRegistry(ImageRegistryType,name,exception);
%
% The format of the GetImageFromMagickRegistry method is:
%
% Image *GetImageFromMagickRegistry(const char *name,ssize_t *id,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o name: the name of the image to retrieve from the registry.
%
% o id: the registry id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GetImageFromMagickRegistry(const char *name,ssize_t *id,
ExceptionInfo *exception)
{
*id=0L;
return((Image *) GetImageRegistry(ImageRegistryType,name,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickRegistry() gets a blob from the registry as defined by the id. If
% the blob that matches the id is not found, NULL is returned.
%
% The format of the GetMagickRegistry method is:
%
% const void *GetMagickRegistry(const ssize_t id,RegistryType *type,
% size_t *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o id: the registry id.
%
% o type: the registry type.
%
% o length: the blob length in number of bytes.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetMagickRegistry(const ssize_t id,RegistryType *type,
size_t *length,ExceptionInfo *exception)
{
char
key[MaxTextExtent];
void
*blob;
*type=UndefinedRegistryType;
*length=0;
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
blob=(void *) GetImageRegistry(ImageRegistryType,key,exception);
if (blob != (void *) NULL)
return(blob);
blob=(void *) GetImageRegistry(ImageInfoRegistryType,key,exception);
if (blob != (void *) NULL)
return(blob);
return((void *) GetImageRegistry(UndefinedRegistryType,key,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t M a g i c k T o k e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickToken() gets a token from the token stream. A token is defined as
% a sequence of characters delimited by whitespace (e.g. clip-path), a
% sequence delimited with quotes (.e.g "Quote me"), or a sequence enclosed in
% parenthesis (e.g. rgb(0,0,0)). GetMagickToken() also recognizes these
% separator characters: ':', '=', ',', and ';'.
%
% The format of the GetMagickToken method is:
%
% void GetMagickToken(const char *start,const char **end,char *token)
%
% A description of each parameter follows:
%
% o start: the start of the token sequence.
%
% o end: point to the end of the token sequence.
%
% o token: copy the token to this buffer.
%
*/
MagickExport void GetMagickToken(const char *start,const char **end,char *token)
{
GetNextToken(start,end,~0UL,token);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageGeometry() returns a region as defined by the geometry string with
% respect to the image and its gravity.
%
% Deprecated, replace with:
%
% if (size_to_fit != MagickFalse)
% ParseRegionGeometry(image,geometry,region_info,&image->exception); else
% ParsePageGeometry(image,geometry,region_info,&image->exception);
%
% The format of the GetImageGeometry method is:
%
% int GetImageGeometry(Image *image,const char *geometry,
% const unsigned int size_to_fit,RectangeInfo *region_info)
%
% A description of each parameter follows:
%
% o flags: Method GetImageGeometry returns a bitmask that indicates
% which of the four values were located in the geometry string.
%
% o geometry: The geometry (e.g. 100x100+10+10).
%
% o size_to_fit: A value other than 0 means to scale the region so it
% fits within the specified width and height.
%
% o region_info: the region as defined by the geometry string with
% respect to the image and its gravity.
%
*/
MagickExport int GetImageGeometry(Image *image,const char *geometry,
const unsigned int size_to_fit,RectangleInfo *region_info)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.4");
if (size_to_fit != MagickFalse)
return((int) ParseRegionGeometry(image,geometry,region_info,&image->exception));
return((int) ParsePageGeometry(image,geometry,region_info,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageList() returns an image at the specified position in the list.
%
% Deprecated, replace with:
%
% CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue,
% exception);
%
% The format of the GetImageList method is:
%
% Image *GetImageList(const Image *images,const ssize_t offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GetImageList(const Image *images,const ssize_t offset,
ExceptionInfo *exception)
{
Image
*image;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
image=CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue,
exception);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageListIndex() returns the position in the list of the specified
% image.
%
% Deprecated, replace with:
%
% GetImageIndexInList(images);
%
% The format of the GetImageListIndex method is:
%
% ssize_t GetImageListIndex(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport ssize_t GetImageListIndex(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetImageIndexInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e L i s t S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageListSize() returns the number of images in the list.
%
% Deprecated, replace with:
%
% GetImageListLength(images);
%
% The format of the GetImageListSize method is:
%
% size_t GetImageListSize(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport size_t GetImageListSize(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetImageListLength(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in RAM, or in a memory-mapped file. The returned pointer
% should *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking GetImagePixels()
% to obtain the black color component or colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% Deprecated, replace with:
%
% GetAuthenticPixels(image,x,y,columns,rows,&image->exception);
%
% The format of the GetImagePixels() method is:
%
% PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows)
{
return(GetAuthenticPixels(image,x,y,columns,rows,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetIndexes() returns the black channel or the colormap indexes associated
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the black channel or colormap indexes are not available.
%
% Deprecated, replace with:
%
% GetAuthenticIndexQueue(image);
%
% The format of the GetIndexes() method is:
%
% IndexPacket *GetIndexes(const Image *image)
%
% A description of each parameter follows:
%
% o indexes: GetIndexes() returns the indexes associated with the last
% call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetIndexes(const Image *image)
{
return(GetAuthenticIndexQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t M a g i c k G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickGeometry() is similar to GetGeometry() except the returned
% geometry is modified as determined by the meta characters: %, !, <, >,
% and ~.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,x,y,width,height);
%
% The format of the GetMagickGeometry method is:
%
% unsigned int GetMagickGeometry(const char *geometry,ssize_t *x,ssize_t *y,
% size_t *width,size_t *height)
%
% A description of each parameter follows:
%
% o geometry: Specifies a character string representing the geometry
% specification.
%
% o x,y: A pointer to an integer. The x and y offset as determined by
% the geometry specification is returned here.
%
% o width,height: A pointer to an unsigned integer. The width and height
% as determined by the geometry specification is returned here.
%
*/
MagickExport unsigned int GetMagickGeometry(const char *geometry,ssize_t *x,
ssize_t *y,size_t *width,size_t *height)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3");
return(ParseMetaGeometry(geometry,x,y,width,height));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImage() returns the next image in a list.
%
% Deprecated, replace with:
%
% GetNextImageInList(images);
%
% The format of the GetNextImage method is:
%
% Image *GetNextImage(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *GetNextImage(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetNextImageInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageAttribute() gets the next image attribute.
%
% Deprecated, replace with:
%
% const char *property;
% property=GetNextImageProperty(image);
% if (property != (const char *) NULL)
% GetImageAttribute(image,property);
%
% The format of the GetNextImageAttribute method is:
%
% const ImageAttribute *GetNextImageAttribute(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const ImageAttribute *GetNextImageAttribute(const Image *image)
{
const char
*property;
property=GetNextImageProperty(image);
if (property == (const char *) NULL)
return((const ImageAttribute *) NULL);
return(GetImageAttribute(image,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N u m b e r S c e n e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNumberScenes() returns the number of images in the list.
%
% Deprecated, replace with:
%
% GetImageListLength(image);
%
% The format of the GetNumberScenes method is:
%
% unsigned int GetNumberScenes(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport unsigned int GetNumberScenes(const Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return((unsigned int) GetImageListLength(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOnePixel() returns a single pixel at the specified (x,y) location.
% The image background color is returned if an error occurs.
%
% Deprecated, replace with:
%
% GetOneAuthenticPixel(image,x,y,&pixel,&image->exception);
%
% The format of the GetOnePixel() method is:
%
% PixelPacket GetOnePixel(const Image image,const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
*/
MagickExport PixelPacket GetOnePixel(Image *image,const ssize_t x,const ssize_t y)
{
PixelPacket
pixel;
(void) GetOneAuthenticPixel(image,x,y,&pixel,&image->exception);
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixels() returns the pixels associated with the last call to
% QueueAuthenticPixels() or GetAuthenticPixels().
%
% Deprecated, replace with:
%
% GetAuthenticPixelQueue(image);
%
% The format of the GetPixels() method is:
%
% PixelPacket *GetPixels(const Image image)
%
% A description of each parameter follows:
%
% o pixels: GetPixels() returns the pixels associated with the last call
% to QueueAuthenticPixels() or GetAuthenticPixels().
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetPixels(const Image *image)
{
return(GetAuthenticPixelQueue(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P r e v i o u s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPreviousImage() returns the previous image in a list.
%
% Deprecated, replace with:
%
% GetPreviousImageInList(images));
%
% The format of the GetPreviousImage method is:
%
% Image *GetPreviousImage(const Image *images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *GetPreviousImage(const Image *images)
{
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(GetPreviousImageInList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H S L T r a n s f o r m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HSLTransform() converts a (hue, saturation, lightness) to a (red, green,
% blue) triple.
%
% The format of the HSLTransformImage method is:
%
% void HSLTransform(const double hue,const double saturation,
% const double lightness,Quantum *red,Quantum *green,Quantum *blue)
%
% A description of each parameter follows:
%
% o hue, saturation, lightness: A double value representing a
% component of the HSL color space.
%
% o red, green, blue: A pointer to a pixel component of type Quantum.
%
*/
static inline MagickRealType HueToRGB(MagickRealType m1,MagickRealType m2,
MagickRealType hue)
{
if (hue < 0.0)
hue+=1.0;
if (hue > 1.0)
hue-=1.0;
if ((6.0*hue) < 1.0)
return(m1+6.0*(m2-m1)*hue);
if ((2.0*hue) < 1.0)
return(m2);
if ((3.0*hue) < 2.0)
return(m1+6.0*(m2-m1)*(2.0/3.0-hue));
return(m1);
}
MagickExport void HSLTransform(const double hue,const double saturation,
const double lightness,Quantum *red,Quantum *green,Quantum *blue)
{
MagickRealType
b,
g,
r,
m1,
m2;
/*
Convert HSL to RGB colorspace.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
if (lightness <= 0.5)
m2=lightness*(saturation+1.0);
else
m2=lightness+saturation-lightness*saturation;
m1=2.0*lightness-m2;
r=HueToRGB(m1,m2,hue+1.0/3.0);
g=HueToRGB(m1,m2,hue);
b=HueToRGB(m1,m2,hue-1.0/3.0);
*red=ClampToQuantum((MagickRealType) QuantumRange*r);
*green=ClampToQuantum((MagickRealType) QuantumRange*g);
*blue=ClampToQuantum((MagickRealType) QuantumRange*b);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i t y A f f i n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentityAffine() initializes the affine transform to the identity matrix.
%
% The format of the IdentityAffine method is:
%
% IdentityAffine(AffineMatrix *affine)
%
% A description of each parameter follows:
%
% o affine: A pointer the affine transform of type AffineMatrix.
%
*/
MagickExport void IdentityAffine(AffineMatrix *affine)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
assert(affine != (AffineMatrix *) NULL);
(void) memset(affine,0,sizeof(AffineMatrix));
affine->sx=1.0;
affine->sy=1.0;
}
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m a g e T o H B i t m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImageToHBITMAP() creates a Windows HBITMAP from an image.
%
% The format of the ImageToHBITMAP method is:
%
% HBITMAP ImageToHBITMAP(Image *image,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to convert.
%
*/
MagickExport void *ImageToHBITMAP(Image *image,ExceptionInfo *exception)
{
BITMAP
bitmap;
HANDLE
bitmap_bitsH;
HBITMAP
bitmapH;
register ssize_t
x;
register const PixelPacket
*p;
register RGBQUAD
*q;
RGBQUAD
*bitmap_bits;
size_t
length;
ssize_t
y;
(void) memset(&bitmap,0,sizeof(bitmap));
bitmap.bmType=0;
bitmap.bmWidth=(LONG) image->columns;
bitmap.bmHeight=(LONG) image->rows;
bitmap.bmWidthBytes=4*bitmap.bmWidth;
bitmap.bmPlanes=1;
bitmap.bmBitsPixel=32;
bitmap.bmBits=NULL;
length=bitmap.bmWidthBytes*bitmap.bmHeight;
bitmap_bitsH=(HANDLE) GlobalAlloc(GMEM_MOVEABLE | GMEM_DDESHARE,length);
if (bitmap_bitsH == NULL)
{
char
*message;
message=GetExceptionMessage(errno);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",message);
message=DestroyString(message);
return(NULL);
}
bitmap_bits=(RGBQUAD *) GlobalLock((HGLOBAL) bitmap_bitsH);
q=bitmap_bits;
if (bitmap.bmBits == NULL)
bitmap.bmBits=bitmap_bits;
(void) SetImageColorspace(image,sRGBColorspace);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
q->rgbRed=ScaleQuantumToChar(GetPixelRed(p));
q->rgbGreen=ScaleQuantumToChar(GetPixelGreen(p));
q->rgbBlue=ScaleQuantumToChar(GetPixelBlue(p));
q->rgbReserved=0;
p++;
q++;
}
}
bitmap.bmBits=bitmap_bits;
bitmapH=CreateBitmapIndirect(&bitmap);
if (bitmapH == NULL)
{
char
*message;
message=GetExceptionMessage(errno);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",message);
message=DestroyString(message);
}
GlobalUnlock((HGLOBAL) bitmap_bitsH);
GlobalFree((HGLOBAL) bitmap_bitsH);
return((void *) bitmapH);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n i t i a l i z e M a g i c k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeMagick() initializes the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreGenesis(path,MagickFalse);
%
% The format of the InitializeMagick function is:
%
% InitializeMagick(const char *path)
%
% A description of each parameter follows:
%
% o path: the execution path of the current ImageMagick client.
%
*/
MagickExport void InitializeMagick(const char *path)
{
MagickCoreGenesis(path,MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolatePixelColor() applies bi-linear or tri-linear interpolation
% between a pixel and it's neighbors.
%
% The format of the InterpolatePixelColor method is:
%
% MagickPixelPacket InterpolatePixelColor(const Image *image,
% CacheView *view_info,InterpolatePixelMethod method,const double x,
% const double y,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o image_view: the image cache view.
%
% o type: the type of pixel color interpolation.
%
% o x,y: A double representing the current (x,y) position of the pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void BicubicInterpolate(const MagickPixelPacket *pixels,const double dx,
MagickPixelPacket *pixel)
{
MagickRealType
dx2,
p,
q,
r,
s;
dx2=dx*dx;
p=(pixels[3].red-pixels[2].red)-(pixels[0].red-pixels[1].red);
q=(pixels[0].red-pixels[1].red)-p;
r=pixels[2].red-pixels[0].red;
s=pixels[1].red;
pixel->red=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].green-pixels[2].green)-(pixels[0].green-pixels[1].green);
q=(pixels[0].green-pixels[1].green)-p;
r=pixels[2].green-pixels[0].green;
s=pixels[1].green;
pixel->green=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].blue-pixels[2].blue)-(pixels[0].blue-pixels[1].blue);
q=(pixels[0].blue-pixels[1].blue)-p;
r=pixels[2].blue-pixels[0].blue;
s=pixels[1].blue;
pixel->blue=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
p=(pixels[3].opacity-pixels[2].opacity)-(pixels[0].opacity-pixels[1].opacity);
q=(pixels[0].opacity-pixels[1].opacity)-p;
r=pixels[2].opacity-pixels[0].opacity;
s=pixels[1].opacity;
pixel->opacity=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
if (pixel->colorspace == CMYKColorspace)
{
p=(pixels[3].index-pixels[2].index)-(pixels[0].index-pixels[1].index);
q=(pixels[0].index-pixels[1].index)-p;
r=pixels[2].index-pixels[0].index;
s=pixels[1].index;
pixel->index=(dx*dx2*p)+(dx2*q)+(dx*r)+s;
}
}
static inline MagickRealType CubicWeightingFunction(const MagickRealType x)
{
MagickRealType
alpha,
gamma;
alpha=MagickMax(x+2.0,0.0);
gamma=1.0*alpha*alpha*alpha;
alpha=MagickMax(x+1.0,0.0);
gamma-=4.0*alpha*alpha*alpha;
alpha=MagickMax(x+0.0,0.0);
gamma+=6.0*alpha*alpha*alpha;
alpha=MagickMax(x-1.0,0.0);
gamma-=4.0*alpha*alpha*alpha;
return(gamma/6.0);
}
static inline double MeshInterpolate(const PointInfo *delta,const double p,
const double x,const double y)
{
return(delta->x*x+delta->y*y+(1.0-delta->x-delta->y)*p);
}
static inline ssize_t NearestNeighbor(MagickRealType x)
{
if (x >= 0.0)
return((ssize_t) (x+0.5));
return((ssize_t) (x-0.5));
}
MagickExport MagickPixelPacket InterpolatePixelColor(const Image *image,
CacheView *image_view,const InterpolatePixelMethod method,const double x,
const double y,ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image_view != (CacheView *) NULL);
GetMagickPixelPacket(image,&pixel);
switch (method)
{
case AverageInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16];
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 16L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
gamma=alpha[i];
gamma=PerceptibleReciprocal(gamma);
pixel.red+=gamma*0.0625*pixels[i].red;
pixel.green+=gamma*0.0625*pixels[i].green;
pixel.blue+=gamma*0.0625*pixels[i].blue;
pixel.opacity+=0.0625*pixels[i].opacity;
if (image->colorspace == CMYKColorspace)
pixel.index+=gamma*0.0625*pixels[i].index;
p++;
}
break;
}
case BicubicInterpolatePixel:
{
MagickPixelPacket
pixels[16],
u[4];
MagickRealType
alpha[16];
PointInfo
delta;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
GetMagickPixelPacket(image,u+i);
for (i=0; i < 16L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+4*i);
BicubicInterpolate(pixels+4*i,delta.x,u+i);
}
delta.y=y-floor(y);
BicubicInterpolate(u,delta.y,&pixel);
break;
}
case BilinearInterpolatePixel:
default:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16];
PointInfo
delta;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),2,2,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
delta.y=y-floor(y);
gamma=(((1.0-delta.y)*((1.0-delta.x)*alpha[0]+delta.x*alpha[1])+delta.y*
((1.0-delta.x)*alpha[2]+delta.x*alpha[3])));
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].red+delta.x*
pixels[1].red)+delta.y*((1.0-delta.x)*pixels[2].red+delta.x*
pixels[3].red));
pixel.green=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].green+delta.x*
pixels[1].green)+delta.y*((1.0-delta.x)*pixels[2].green+
delta.x*pixels[3].green));
pixel.blue=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].blue+delta.x*
pixels[1].blue)+delta.y*((1.0-delta.x)*pixels[2].blue+delta.x*
pixels[3].blue));
pixel.opacity=((1.0-delta.y)*((1.0-delta.x)*pixels[0].opacity+delta.x*
pixels[1].opacity)+delta.y*((1.0-delta.x)*pixels[2].opacity+delta.x*
pixels[3].opacity));
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].index+delta.x*
pixels[1].index)+delta.y*((1.0-delta.x)*pixels[2].index+delta.x*
pixels[3].index));
break;
}
case FilterInterpolatePixel:
{
Image
*excerpt_image,
*filter_image;
MagickPixelPacket
pixels[1];
RectangleInfo
geometry;
geometry.width=4L;
geometry.height=4L;
geometry.x=(ssize_t) floor(x)-1L;
geometry.y=(ssize_t) floor(y)-1L;
excerpt_image=ExcerptImage(image,&geometry,exception);
if (excerpt_image == (Image *) NULL)
break;
filter_image=ResizeImage(excerpt_image,1,1,image->filter,image->blur,
exception);
excerpt_image=DestroyImage(excerpt_image);
if (filter_image == (Image *) NULL)
break;
p=GetVirtualPixels(filter_image,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
filter_image=DestroyImage(filter_image);
break;
}
indexes=GetVirtualIndexQueue(filter_image);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
filter_image=DestroyImage(filter_image);
break;
}
case IntegerInterpolatePixel:
{
MagickPixelPacket
pixels[1];
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),1,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
break;
}
case MeshInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[4];
MagickRealType
alpha[4];
PointInfo
delta,
luminance;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t)
floor(y),2,2,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 4L; i++)
{
GetMagickPixelPacket(image,pixels+i);
SetMagickPixelPacket(image,p,indexes+i,pixels+i);
alpha[i]=1.0;
if (image->matte != MagickFalse)
{
alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p));
pixels[i].red*=alpha[i];
pixels[i].green*=alpha[i];
pixels[i].blue*=alpha[i];
if (image->colorspace == CMYKColorspace)
pixels[i].index*=alpha[i];
}
p++;
}
delta.x=x-floor(x);
delta.y=y-floor(y);
luminance.x=MagickPixelLuma(pixels+0)-MagickPixelLuma(pixels+3);
luminance.y=MagickPixelLuma(pixels+1)-MagickPixelLuma(pixels+2);
if (fabs(luminance.x) < fabs(luminance.y))
{
/*
Diagonal 0-3 NW-SE.
*/
if (delta.x <= delta.y)
{
/*
Bottom-left triangle (pixel:2, diagonal: 0-3).
*/
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[2].red,
pixels[3].red,pixels[0].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[2].green,
pixels[3].green,pixels[0].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[2].blue,
pixels[3].blue,pixels[0].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[2].opacity,
pixels[3].opacity,pixels[0].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[2].index,
pixels[3].index,pixels[0].index);
}
else
{
/*
Top-right triangle (pixel:1, diagonal: 0-3).
*/
delta.x=1.0-delta.x;
gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[1].red,
pixels[0].red,pixels[3].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[1].green,
pixels[0].green,pixels[3].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[1].blue,
pixels[0].blue,pixels[3].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[1].opacity,
pixels[0].opacity,pixels[3].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[1].index,
pixels[0].index,pixels[3].index);
}
}
else
{
/*
Diagonal 1-2 NE-SW.
*/
if (delta.x <= (1.0-delta.y))
{
/*
Top-left triangle (pixel 0, diagonal: 1-2).
*/
gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[0].red,
pixels[1].red,pixels[2].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[0].green,
pixels[1].green,pixels[2].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[0].blue,
pixels[1].blue,pixels[2].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[0].opacity,
pixels[1].opacity,pixels[2].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[0].index,
pixels[1].index,pixels[2].index);
}
else
{
/*
Bottom-right triangle (pixel: 3, diagonal: 1-2).
*/
delta.x=1.0-delta.x;
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]);
gamma=PerceptibleReciprocal(gamma);
pixel.red=gamma*MeshInterpolate(&delta,pixels[3].red,
pixels[2].red,pixels[1].red);
pixel.green=gamma*MeshInterpolate(&delta,pixels[3].green,
pixels[2].green,pixels[1].green);
pixel.blue=gamma*MeshInterpolate(&delta,pixels[3].blue,
pixels[2].blue,pixels[1].blue);
pixel.opacity=gamma*MeshInterpolate(&delta,pixels[3].opacity,
pixels[2].opacity,pixels[1].opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=gamma*MeshInterpolate(&delta,pixels[3].index,
pixels[2].index,pixels[1].index);
}
}
break;
}
case NearestNeighborInterpolatePixel:
{
MagickPixelPacket
pixels[1];
p=GetCacheViewVirtualPixels(image_view,NearestNeighbor(x),
NearestNeighbor(y),1,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
GetMagickPixelPacket(image,pixels);
SetMagickPixelPacket(image,p,indexes,&pixel);
break;
}
case SplineInterpolatePixel:
{
double
gamma;
MagickPixelPacket
pixels[16];
MagickRealType
alpha[16],
dx,
dy;
PointInfo
delta;
ssize_t
j,
n;
p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t)
floor(y)-1,4,4,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
n=0;
delta.x=x-floor(x);
delta.y=y-floor(y);
for (i=(-1); i < 3L; i++)
{
dy=CubicWeightingFunction((MagickRealType) i-delta.y);
for (j=(-1); j < 3L; j++)
{
GetMagickPixelPacket(image,pixels+n);
SetMagickPixelPacket(image,p,indexes+n,pixels+n);
alpha[n]=1.0;
if (image->matte != MagickFalse)
{
alpha[n]=QuantumScale*((MagickRealType)
GetPixelAlpha(p));
pixels[n].red*=alpha[n];
pixels[n].green*=alpha[n];
pixels[n].blue*=alpha[n];
if (image->colorspace == CMYKColorspace)
pixels[n].index*=alpha[n];
}
dx=CubicWeightingFunction(delta.x-(MagickRealType) j);
gamma=alpha[n];
gamma=PerceptibleReciprocal(gamma);
pixel.red+=gamma*dx*dy*pixels[n].red;
pixel.green+=gamma*dx*dy*pixels[n].green;
pixel.blue+=gamma*dx*dy*pixels[n].blue;
if (image->matte != MagickFalse)
pixel.opacity+=dx*dy*pixels[n].opacity;
if (image->colorspace == CMYKColorspace)
pixel.index+=gamma*dx*dy*pixels[n].index;
n++;
p++;
}
}
break;
}
}
return(pixel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e A t t r i b u t e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageAttributes() replaces any embedded formatting characters with
% the appropriate image attribute and returns the translated text.
%
% Deprecated, replace with:
%
% InterpretImageProperties(image_info,image,embed_text);
%
% The format of the InterpretImageAttributes method is:
%
% char *InterpretImageAttributes(const ImageInfo *image_info,Image *image,
% const char *embed_text)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o embed_text: the address of a character string containing the embedded
% formatting characters.
%
*/
MagickExport char *InterpretImageAttributes(const ImageInfo *image_info,
Image *image,const char *embed_text)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
return(InterpretImageProperties(image_info,image,embed_text));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e s R G B C o m p a n d o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InversesRGBCompandor() removes the gamma function from a sRGB pixel.
%
% The format of the InversesRGBCompandor method is:
%
% MagickRealType InversesRGBCompandor(const MagickRealType pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
MagickExport MagickRealType InversesRGBCompandor(const MagickRealType pixel)
{
if (pixel <= (0.0404482362771076*QuantumRange))
return(pixel/12.92);
return(QuantumRange*pow((QuantumScale*pixel+0.055)/1.055,2.4));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M a g i c k I n s t a n t i a t e d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMagickInstantiated() returns MagickTrue if the ImageMagick environment
% is currently instantiated: MagickCoreGenesis() has been called but
% MagickDestroy() has not.
%
% The format of the IsMagickInstantiated method is:
%
% MagickBooleanType IsMagickInstantiated(void)
%
*/
MagickExport MagickBooleanType IsMagickInstantiated(void)
{
return(IsMagickCoreInstantiated());
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I s S u b i m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsSubimage() returns MagickTrue if the geometry is a valid subimage
% specification (e.g. [1], [1-9], [1,7,4]).
%
% The format of the IsSubimage method is:
%
% unsigned int IsSubimage(const char *geometry,const unsigned int pedantic)
%
% A description of each parameter follows:
%
% o geometry: This string is the geometry specification.
%
% o pedantic: A value other than 0 invokes a more restrictive set of
% conditions for a valid specification (e.g. [1], [1-4], [4-1]).
%
*/
MagickExport unsigned int IsSubimage(const char *geometry,
const unsigned int pedantic)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((strchr(geometry,'x') != (char *) NULL) ||
(strchr(geometry,'X') != (char *) NULL))
return(MagickFalse);
if ((pedantic != MagickFalse) && (strchr(geometry,',') != (char *) NULL))
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColor() will map the given color to "black" and "white"
% values, limearly spreading out the colors, and level values on a channel by
% channel bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% Deprecated, replace with:
%
% LevelColorsImageChannel(image,channel,black_color,white_color,invert);
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,const ChannelType channel,
% const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
% const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
*/
MagickBooleanType LevelImageColors(Image *image,const ChannelType channel,
const MagickPixelPacket *black_color,const MagickPixelPacket *white_color,
const MagickBooleanType invert)
{
return(LevelColorsImageChannel(image,channel,black_color,white_color,invert));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i b e r a t e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiberateMemory() frees memory that has already been allocated, and NULL's
% the pointer to it.
%
% The format of the LiberateMemory method is:
%
% void LiberateMemory(void **memory)
%
% A description of each parameter follows:
%
% o memory: A pointer to a block of memory to free for reuse.
%
*/
MagickExport void LiberateMemory(void **memory)
{
assert(memory != (void **) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*memory == (void *) NULL)
return;
free(*memory);
*memory=(void *) NULL;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i b e r a t e S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiberateSemaphoreInfo() relinquishes a semaphore.
%
% Deprecated, replace with:
%
% UnlockSemaphoreInfo(*semaphore_info);
%
% The format of the LiberateSemaphoreInfo method is:
%
% LiberateSemaphoreInfo(void **semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void LiberateSemaphoreInfo(SemaphoreInfo **semaphore_info)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
UnlockSemaphoreInfo(*semaphore_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k I n c a r n a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickIncarnate() initializes the ImageMagick environment.
%
% Deprecated, replace with:
%
% MagickCoreGenesis(path,MagickFalse);
%
% The format of the MagickIncarnate function is:
%
% MagickIncarnate(const char *path)
%
% A description of each parameter follows:
%
% o path: the execution path of the current ImageMagick client.
%
*/
MagickExport void MagickIncarnate(const char *path)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
MagickCoreGenesis(path,MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o n i t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMonitor() calls the monitor handler method with a text string that
% describes the task and a measure of completion. The method returns
% MagickTrue on success otherwise MagickFalse if an error is encountered, e.g.
% if there was a user interrupt.
%
% The format of the MagickMonitor method is:
%
% MagickBooleanType MagickMonitor(const char *text,
% const MagickOffsetType offset,const MagickSizeType span,
% void *client_data)
%
% A description of each parameter follows:
%
% o offset: the position relative to the span parameter which represents
% how much progress has been made toward completing a task.
%
% o span: the span relative to completing a task.
%
% o client_data: the client data.
%
*/
MagickExport MagickBooleanType MagickMonitor(const char *text,
const MagickOffsetType offset,const MagickSizeType span,
void *magick_unused(client_data))
{
ExceptionInfo
*exception;
MagickBooleanType
status;
magick_unreferenced(client_data);
assert(text != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",text);
ProcessPendingEvents(text);
status=MagickTrue;
exception=AcquireExceptionInfo();
if (monitor_handler != (MonitorHandler) NULL)
status=(*monitor_handler)(text,offset,span,exception);
exception=DestroyExceptionInfo(exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MapImage() replaces the colors of an image with the closest color from a
% reference image.
%
% Deprecated, replace with:
%
% QuantizeInfo quantize_info;
% GetQuantizeInfo(&quantize_info);
% quantize_info.dither=dither;
% RemapImage(&quantize_info,image,map_image);
%
% The format of the MapImage method is:
%
% MagickBooleanType MapImage(Image *image,const Image *map_image,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o map_image: the image. Reduce image to a set of colors represented by
% this image.
%
% o dither: Set this integer value to something other than zero to
% dither the mapped image.
%
*/
MagickExport MagickBooleanType MapImage(Image *image,const Image *map_image,
const MagickBooleanType dither)
{
QuantizeInfo
quantize_info;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(map_image != (Image *) NULL);
assert(map_image->signature == MagickCoreSignature);
GetQuantizeInfo(&quantize_info);
quantize_info.dither=dither;
return(RemapImage(&quantize_info,image,map_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MapImages() replaces the colors of a sequence of images with the closest
% color from a reference image.
%
% Deprecated, replace with:
%
% QuantizeInfo quantize_info;
% GetQuantizeInfo(&quantize_info);
% quantize_info.dither=dither;
% RemapImages(&quantize_info,images,map_image);
%
% The format of the MapImage method is:
%
% MagickBooleanType MapImages(Image *images,Image *map_image,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a set of Image structures.
%
% o map_image: the image. Reduce image to a set of colors represented by
% this image.
%
% o dither: Set this integer value to something other than zero to
% dither the quantized image.
%
*/
MagickExport MagickBooleanType MapImages(Image *images,const Image *map_image,
const MagickBooleanType dither)
{
QuantizeInfo
quantize_info;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
GetQuantizeInfo(&quantize_info);
quantize_info.dither=dither;
return(RemapImages(&quantize_info,images,map_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t t e F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatteFloodfill() changes the transparency value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod
% is specified, the transparency value is changed for any neighbor pixel
% that does not match the bordercolor member of image.
%
% By default target must match a particular pixel transparency exactly.
% However, in many cases two transparency values may differ by a
% small amount. The fuzz member of image defines how much tolerance is
% acceptable to consider two transparency values as the same. For example,
% set fuzz to 10 and the opacity values of 100 and 102 respectively are
% now interpreted as the same value for the purposes of the floodfill.
%
% The format of the MatteFloodfillImage method is:
%
% MagickBooleanType MatteFloodfillImage(Image *image,
% const PixelPacket target,const Quantum opacity,const ssize_t x_offset,
% const ssize_t y_offset,const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
% o x,y: the starting location of the operation.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
MagickExport MagickBooleanType MatteFloodfillImage(Image *image,
const PixelPacket target,const Quantum opacity,const ssize_t x_offset,
const ssize_t y_offset,const PaintMethod method)
{
Image
*floodplane_image;
MagickBooleanType
skip;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
/*
Set floodfill color.
*/
segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize,
sizeof(*segment_stack));
if (segment_stack == (SegmentInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Push initial segment on stack.
*/
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
while (s > segment_stack)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception);
q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
q--;
p--;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetVirtualPixels(image,x,y,image->columns-x,1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1,
&image->exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
q->opacity=(Quantum) TransparentOpacity;
q++;
p++;
}
if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1,
&image->exception);
q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1,
&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
if (method == FloodfillMethod)
{
if (IsColorSimilar(image,p,&target) != MagickFalse)
break;
}
else
if (IsColorSimilar(image,p,&target) == MagickFalse)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Tile fill color onto floodplane.
*/
p=GetVirtualPixels(floodplane_image,0,y,image->columns,1,
&image->exception);
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
q->opacity=opacity;
p++;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack);
floodplane_image=DestroyImage(floodplane_image);
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a x i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaximumImages() returns the maximum intensity of an image sequence.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MinEvaluateOperator,exception);
%
% The format of the MaxImages method is:
%
% Image *MaximumImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MaximumImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MinEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinimumImages() returns the minimum intensity of an image sequence.
%
% Deprecated, replace with:
%
% EvaluateImages(images,MinEvaluateOperator,exception);
%
% The format of the MinimumImages method is:
%
% Image *MinimumImages(Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinimumImages(const Image *images,ExceptionInfo *exception)
{
return(EvaluateImages(images,MinEvaluateOperator,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e d i a n F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MedianFilterImage() applies a digital filter that improves the quality
% of a noisy image. Each pixel is replaced by the median in a set of
% neighboring pixels as defined by radius.
%
% The algorithm was contributed by Mike Edmonds and implements an insertion
% sort for selecting median color-channel values. For more on this algorithm
% see "Skip Lists: A probabilistic Alternative to Balanced Trees" by William
% Pugh in the June 1990 of Communications of the ACM.
%
% The format of the MedianFilterImage method is:
%
% Image *MedianFilterImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MedianFilterImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*median_image;
median_image=StatisticImage(image,MedianStatistic,(size_t) radius,(size_t)
radius,exception);
return(median_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModeImage() makes each pixel the 'predominant color' of the neighborhood
% of the specified radius.
%
% The format of the ModeImage method is:
%
% Image *ModeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ModeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*mode_image;
mode_image=StatisticImage(image,ModeStatistic,(size_t) radius,(size_t) radius,
exception);
return(mode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o s a i c I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MosaicImages() Obsolete Function: Use MergeImageLayers() instead.
%
% Deprecated, replace with:
%
% MergeImageLayers(image,MosaicLayer,exception);
%
% The format of the MosaicImage method is:
%
% Image *MosaicImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image list to be composited together
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MosaicImages(Image *image,ExceptionInfo *exception)
{
return(MergeImageLayers(image,MosaicLayer,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the OpaqueImage method is:
%
% MagickBooleanType OpaqueImage(Image *image,
% const PixelPacket *target,const PixelPacket fill)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
*/
MagickExport MagickBooleanType OpaqueImage(Image *image,
const PixelPacket target,const PixelPacket fill)
{
#define OpaqueImageTag "Opaque/Image"
MagickBooleanType
proceed;
register ssize_t
i;
ssize_t
y;
/*
Make image color opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0");
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Make DirectClass image opaque.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
*q=fill;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
proceed=SetImageProgress(image,OpaqueImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
break;
}
case PseudoClass:
{
/*
Make PseudoClass image opaque.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsColorSimilar(image,&image->colormap[i],&target) != MagickFalse)
image->colormap[i]=fill;
}
if (fill.opacity != OpaqueOpacity)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
q->opacity=fill.opacity;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
}
(void) SyncImage(image);
break;
}
}
if (fill.opacity != OpaqueOpacity)
image->matte=MagickTrue;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p e n C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenCacheView() opens a view into the pixel cache, using the
% VirtualPixelMethod that is defined within the given image itself.
%
% Deprecated, replace with:
%
% AcquireVirtualCacheView(image,&image->exception);
%
% The format of the OpenCacheView method is:
%
% CacheView *OpenCacheView(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheView *OpenCacheView(const Image *image)
{
return(AcquireVirtualCacheView(image,&((Image *) image)->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p e n M a g i c k S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenMagickStream() opens the file at the specified path and return the
% associated stream.
%
% The path of the OpenMagickStream method is:
%
% FILE *OpenMagickStream(const char *path,const char *mode)
%
% A description of each parameter follows.
%
% o path: the file path.
%
% o mode: the file mode.
%
*/
#if defined(MAGICKCORE_HAVE__WFOPEN)
static size_t UTF8ToUTF16(const unsigned char *utf8,wchar_t *utf16)
{
register const unsigned char
*p;
if (utf16 != (wchar_t *) NULL)
{
register wchar_t
*q;
wchar_t
c;
/*
Convert UTF-8 to UTF-16.
*/
q=utf16;
for (p=utf8; *p != '\0'; p++)
{
if ((*p & 0x80) == 0)
*q=(*p);
else
if ((*p & 0xE0) == 0xC0)
{
c=(*p);
*q=(c & 0x1F) << 6;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
*q|=(*p & 0x3F);
}
else
if ((*p & 0xF0) == 0xE0)
{
c=(*p);
*q=c << 12;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
c=(*p);
*q|=(c & 0x3F) << 6;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
*q|=(*p & 0x3F);
}
else
return(0);
q++;
}
*q++='\0';
return(q-utf16);
}
/*
Compute UTF-16 string length.
*/
for (p=utf8; *p != '\0'; p++)
{
if ((*p & 0x80) == 0)
;
else
if ((*p & 0xE0) == 0xC0)
{
p++;
if ((*p & 0xC0) != 0x80)
return(0);
}
else
if ((*p & 0xF0) == 0xE0)
{
p++;
if ((*p & 0xC0) != 0x80)
return(0);
p++;
if ((*p & 0xC0) != 0x80)
return(0);
}
else
return(0);
}
return(p-utf8);
}
static wchar_t *ConvertUTF8ToUTF16(const unsigned char *source)
{
size_t
length;
wchar_t
*utf16;
length=UTF8ToUTF16(source,(wchar_t *) NULL);
if (length == 0)
{
register ssize_t
i;
/*
Not UTF-8, just copy.
*/
length=strlen((const char *) source);
utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16));
if (utf16 == (wchar_t *) NULL)
return((wchar_t *) NULL);
for (i=0; i <= (ssize_t) length; i++)
utf16[i]=source[i];
return(utf16);
}
utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16));
if (utf16 == (wchar_t *) NULL)
return((wchar_t *) NULL);
length=UTF8ToUTF16(source,utf16);
return(utf16);
}
#endif
MagickExport FILE *OpenMagickStream(const char *path,const char *mode)
{
FILE
*file;
if ((path == (const char *) NULL) || (mode == (const char *) NULL))
{
errno=EINVAL;
return((FILE *) NULL);
}
file=(FILE *) NULL;
#if defined(MAGICKCORE_HAVE__WFOPEN)
{
wchar_t
*unicode_mode,
*unicode_path;
unicode_path=ConvertUTF8ToUTF16((const unsigned char *) path);
if (unicode_path == (wchar_t *) NULL)
return((FILE *) NULL);
unicode_mode=ConvertUTF8ToUTF16((const unsigned char *) mode);
if (unicode_mode == (wchar_t *) NULL)
{
unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path);
return((FILE *) NULL);
}
file=_wfopen(unicode_path,unicode_mode);
unicode_mode=(wchar_t *) RelinquishMagickMemory(unicode_mode);
unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path);
}
#endif
if (file == (FILE *) NULL)
file=fopen(path,mode);
return(file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a i n t F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintFloodfill() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% Deprecated, replace with:
%
% FloodfillPaintImage(image,channel,draw_info,target,x,y,
% method == FloodfillMethod ? MagickFalse : MagickTrue);
%
% The format of the PaintFloodfillImage method is:
%
% MagickBooleanType PaintFloodfillImage(Image *image,
% const ChannelType channel,const MagickPixelPacket target,
% const ssize_t x,const ssize_t y,const DrawInfo *draw_info,
% const PaintMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o x,y: the starting location of the operation.
%
% o draw_info: the draw info.
%
% o method: Choose either FloodfillMethod or FillToBorderMethod.
%
*/
MagickExport MagickBooleanType PaintFloodfillImage(Image *image,
const ChannelType channel,const MagickPixelPacket *target,const ssize_t x,
const ssize_t y,const DrawInfo *draw_info,const PaintMethod method)
{
MagickBooleanType
status;
status=FloodfillPaintImage(image,channel,draw_info,target,x,y,
method == FloodfillMethod ? MagickFalse : MagickTrue);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% P a i n t O p a q u e I m a g e %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% Deprecated, replace with:
%
% OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse);
% OpaquePaintImageChannel(image,channel,target,fill,MagickFalse);
%
% The format of the PaintOpaqueImage method is:
%
% MagickBooleanType PaintOpaqueImage(Image *image,
% const PixelPacket *target,const PixelPacket *fill)
% MagickBooleanType PaintOpaqueImageChannel(Image *image,
% const ChannelType channel,const PixelPacket *target,
% const PixelPacket *fill)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
*/
MagickExport MagickBooleanType PaintOpaqueImage(Image *image,
const MagickPixelPacket *target,const MagickPixelPacket *fill)
{
MagickBooleanType
status;
status=OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse);
return(status);
}
MagickExport MagickBooleanType PaintOpaqueImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *target,
const MagickPixelPacket *fill)
{
return(OpaquePaintImageChannel(image,channel,target,fill,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a i n t T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PaintTransparentImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% Deprecated, replace with:
%
% TransparentPaintImage(image,target,opacity,MagickFalse);
%
% The format of the PaintTransparentImage method is:
%
% MagickBooleanType PaintTransparentImage(Image *image,
% const MagickPixelPacket *target,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the replacement opacity value.
%
*/
MagickExport MagickBooleanType PaintTransparentImage(Image *image,
const MagickPixelPacket *target,const Quantum opacity)
{
return(TransparentPaintImage(image,target,opacity,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P a r s e I m a g e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ParseImageGeometry() is similar to GetGeometry() except the returned
% geometry is modified as determined by the meta characters: %, !, <,
% and >.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,x,y,width,height);
%
% The format of the ParseImageGeometry method is:
%
% int ParseImageGeometry(char *geometry,ssize_t *x,ssize_t *y,
% size_t *width,size_t *height)
%
% A description of each parameter follows:
%
% o flags: Method ParseImageGeometry returns a bitmask that indicates
% which of the four values were located in the geometry string.
%
% o image_geometry: Specifies a character string representing the geometry
% specification.
%
% o x,y: A pointer to an integer. The x and y offset as determined by
% the geometry specification is returned here.
%
% o width,height: A pointer to an unsigned integer. The width and height
% as determined by the geometry specification is returned here.
%
*/
MagickExport int ParseImageGeometry(const char *geometry,ssize_t *x,ssize_t *y,
size_t *width,size_t *height)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
return((int) ParseMetaGeometry(geometry,x,y,width,height));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P a r s e S i z e G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ParseSizeGeometry() returns a region as defined by the geometry string with
% respect to the image dimensions and aspect ratio.
%
% Deprecated, replace with:
%
% ParseMetaGeometry(geometry,®ion_info->x,®ion_info->y,
% ®ion_info->width,®ion_info->height);
%
% The format of the ParseSizeGeometry method is:
%
% MagickStatusType ParseSizeGeometry(const Image *image,
% const char *geometry,RectangeInfo *region_info)
%
% A description of each parameter follows:
%
% o geometry: The geometry (e.g. 100x100+10+10).
%
% o region_info: the region as defined by the geometry string.
%
*/
MagickExport MagickStatusType ParseSizeGeometry(const Image *image,
const char *geometry,RectangleInfo *region_info)
{
MagickStatusType
flags;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.4.7");
SetGeometry(image,region_info);
flags=ParseMetaGeometry(geometry,®ion_info->x,®ion_info->y,
®ion_info->width,®ion_info->height);
return(flags);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o p I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PopImageList() removes the last image in the list.
%
% Deprecated, replace with:
%
% RemoveLastImageFromList(images);
%
% The format of the PopImageList method is:
%
% Image *PopImageList(Image **images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *PopImageList(Image **images)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(RemoveLastImageFromList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o p I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PopImagePixels() transfers one or more pixel components from the image pixel
% cache to a user supplied buffer. The pixels are returned in network byte
% order. MagickTrue is returned if the pixels are successfully transferred,
% otherwise MagickFalse.
%
% The format of the PopImagePixels method is:
%
% size_t PopImagePixels(Image *,const QuantumType quantum,
% unsigned char *destination)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o quantum: Declare which pixel components to transfer (RGB, RGBA, etc).
%
% o destination: The components are transferred to this buffer.
%
*/
MagickExport size_t PopImagePixels(Image *image,const QuantumType quantum,
unsigned char *destination)
{
QuantumInfo
*quantum_info;
size_t
length;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
length=ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info,
quantum,destination,&image->exception);
quantum_info=DestroyQuantumInfo(quantum_info);
return(length);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t s c r i p t G e o m e t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PostscriptGeometry() replaces any page mneumonic with the equivalent size in
% picas.
%
% Deprecated, replace with:
%
% GetPageGeometry(page);
%
% The format of the PostscriptGeometry method is:
%
% char *PostscriptGeometry(const char *page)
%
% A description of each parameter follows.
%
% o page: Specifies a pointer to an array of characters.
% The string is either a Postscript page name (e.g. A4) or a postscript
% page geometry (e.g. 612x792+36+36).
%
*/
MagickExport char *PostscriptGeometry(const char *page)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
return(GetPageGeometry(page));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P u s h I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PushImageList() adds an image to the end of the list.
%
% Deprecated, replace with:
%
% AppendImageToList(images,CloneImageList(image,exception));
%
% The format of the PushImageList method is:
%
% unsigned int PushImageList(Image *images,const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int PushImageList(Image **images,const Image *image,
ExceptionInfo *exception)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
AppendImageToList(images,CloneImageList(image,exception));
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P u s h I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PushImagePixels() transfers one or more pixel components from a user
% supplied buffer into the image pixel cache of an image. The pixels are
% expected in network byte order. It returns MagickTrue if the pixels are
% successfully transferred, otherwise MagickFalse.
%
% The format of the PushImagePixels method is:
%
% size_t PushImagePixels(Image *image,const QuantumType quantum,
% const unsigned char *source)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o quantum: Declare which pixel components to transfer (red, green, blue,
% opacity, RGB, or RGBA).
%
% o source: The pixel components are transferred from this buffer.
%
*/
MagickExport size_t PushImagePixels(Image *image,const QuantumType quantum,
const unsigned char *source)
{
QuantumInfo
*quantum_info;
size_t
length;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
length=ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,quantum,
source,&image->exception);
quantum_info=DestroyQuantumInfo(quantum_info);
return(length);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z a t i o n E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizationError() measures the difference between the original and
% quantized images. This difference is the total quantization error. The
% error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% Deprecated, replace with:
%
% GetImageQuantizeError(image);
%
% The format of the QuantizationError method is:
%
% unsigned int QuantizationError(Image *image)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
*/
MagickExport unsigned int QuantizationError(Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3");
return(GetImageQuantizeError(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a d i a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RadialBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RadialBlurImage method is:
%
% Image *RadialBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
% Image *RadialBlurImageChannel(const Image *image,const ChannelType channel,
% const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o angle: the angle of the radial blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RadialBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
return(RotationalBlurImage(image,angle,exception));
}
MagickExport Image *RadialBlurImageChannel(const Image *image,
const ChannelType channel,const double angle,ExceptionInfo *exception)
{
return(RotationalBlurImageChannel(image,channel,angle,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% R a n d o m C h a n n e l T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomChannelThresholdImage() changes the value of individual pixels based
% on the intensity of each pixel compared to a random threshold. The result
% is a low-contrast, two color image.
%
% The format of the RandomChannelThresholdImage method is:
%
% unsigned int RandomChannelThresholdImage(Image *image,
% const char *channel, const char *thresholds,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o thresholds: a geometry string containing LOWxHIGH thresholds.
% If the string contains 2x2, 3x3, or 4x4, then an ordered
% dither of order 2, 3, or 4 will be performed instead.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int RandomChannelThresholdImage(Image *image,
const char *channel,const char *thresholds,ExceptionInfo *exception)
{
#define RandomChannelThresholdImageText " RandomChannelThreshold image... "
double
lower_threshold,
upper_threshold;
RandomInfo
*random_info;
ssize_t
count,
y;
static MagickRealType
o2[4]={0.2f, 0.6f, 0.8f, 0.4f},
o3[9]={0.1f, 0.6f, 0.3f, 0.7f, 0.5f, 0.8f, 0.4f, 0.9f, 0.2f},
o4[16]={0.1f, 0.7f, 1.1f, 0.3f, 1.0f, 0.5f, 1.5f, 0.8f, 1.4f, 1.6f, 0.6f,
1.2f, 0.4f, 0.9f, 1.3f, 0.2f},
threshold=128;
size_t
order;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (thresholds == (const char *) NULL)
return(MagickTrue);
lower_threshold=0;
upper_threshold=0;
if (LocaleCompare(thresholds,"2x2") == 0)
order=2;
else
if (LocaleCompare(thresholds,"3x3") == 0)
order=3;
else
if (LocaleCompare(thresholds,"4x4") == 0)
order=4;
else
{
order=1;
count=(ssize_t) sscanf(thresholds,"%lf[/x%%]%lf",&lower_threshold,
&upper_threshold);
if (strchr(thresholds,'%') != (char *) NULL)
{
upper_threshold*=(.01*QuantumRange);
lower_threshold*=(.01*QuantumRange);
}
if (count == 1)
upper_threshold=(MagickRealType) QuantumRange-lower_threshold;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" RandomChannelThresholdImage: channel type=%s",channel);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Thresholds: %s (%fx%f)",thresholds,lower_threshold,upper_threshold);
if (LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"intensity") == 0)
if (AcquireImageColormap(image,2) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
random_info=AcquireRandomInfo();
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register IndexPacket
index,
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
if (LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"intensity") == 0)
{
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=GetPixelIntensity(image,q);
if (order == 1)
{
if (intensity < lower_threshold)
threshold=lower_threshold;
else if (intensity > upper_threshold)
threshold=upper_threshold;
else
threshold=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info));
}
else if (order == 2)
threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)];
else if (order == 3)
threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)];
else if (order == 4)
threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)];
index=(IndexPacket) (intensity <= threshold ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
}
if (LocaleCompare(channel,"opacity") == 0 ||
LocaleCompare(channel,"all") == 0 ||
LocaleCompare(channel,"matte") == 0)
{
if (image->matte != MagickFalse)
for (x=0; x < (ssize_t) image->columns; x++)
{
if (order == 1)
{
if ((MagickRealType) q->opacity < lower_threshold)
threshold=lower_threshold;
else if ((MagickRealType) q->opacity > upper_threshold)
threshold=upper_threshold;
else
threshold=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info));
}
else if (order == 2)
threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)];
else if (order == 3)
threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)];
else if (order == 4)
threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)]/1.7;
SetPixelOpacity(q,(MagickRealType) q->opacity <=
threshold ? 0 : QuantumRange);
q++;
}
}
else
{
/* To Do: red, green, blue, cyan, magenta, yellow, black */
if (LocaleCompare(channel,"intensity") != 0)
ThrowBinaryException(OptionError,"UnrecognizedChannelType",
image->filename);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
random_info=DestroyRandomInfo(random_info);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a c q u i r e M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReacquireMemory() changes the size of the memory and returns a pointer to
% the (possibly moved) block. The contents will be unchanged up to the
% lesser of the new and old sizes.
%
% The format of the ReacquireMemory method is:
%
% void ReacquireMemory(void **memory,const size_t size)
%
% A description of each parameter follows:
%
% o memory: A pointer to a memory allocation. On return the pointer
% may change but the contents of the original allocation will not.
%
% o size: the new size of the allocated memory.
%
*/
MagickExport void ReacquireMemory(void **memory,const size_t size)
{
void
*allocation;
assert(memory != (void **) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*memory == (void *) NULL)
{
*memory=AcquireMagickMemory(size);
return;
}
allocation=realloc(*memory,size);
if (allocation == (void *) NULL)
*memory=RelinquishMagickMemory(*memory);
*memory=allocation;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RecolorImage() apply color transformation to an image. The method permits
% saturation changes, hue rotation, luminance to alpha, and various other
% effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the RecolorImage method is:
%
% Image *RecolorImage(const Image *image,const size_t order,
% const double *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o order: the number of columns and rows in the recolor matrix.
%
% o color_matrix: An array of double representing the recolor matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RecolorImage(const Image *image,const size_t order,
const double *color_matrix,ExceptionInfo *exception)
{
KernelInfo
*kernel_info;
Image
*recolor_image;
kernel_info=AcquireKernelInfo("1");
if (kernel_info == (KernelInfo *) NULL)
return((Image *) NULL);
kernel_info->width=order;
kernel_info->height=order;
kernel_info->values=(double *) color_matrix;
recolor_image=ColorMatrixImage(image,kernel_info,exception);
kernel_info->values=(double *) NULL;
kernel_info=DestroyKernelInfo(kernel_info);
return(recolor_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e d u c e N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceNoiseImage() smooths the contours of an image while still preserving
% edge information. The algorithm works by replacing each pixel with its
% neighbor closest in value. A neighbor is defined by radius. Use a radius
% of 0 and ReduceNoise() selects a suitable radius for you.
%
% The format of the ReduceNoiseImage method is:
%
% Image *ReduceNoiseImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReduceNoiseImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*reduce_image;
reduce_image=StatisticImage(image,NonpeakStatistic,(size_t) radius,(size_t)
radius,exception);
return(reduce_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n g u i s h S e m a p h o r e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishSemaphoreInfo() relinquishes a semaphore.
%
% The format of the RelinquishSemaphoreInfo method is:
%
% RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info)
%
% A description of each parameter follows:
%
% o semaphore_info: Specifies a pointer to an SemaphoreInfo structure.
%
*/
MagickExport void RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info)
{
assert(semaphore_info != (SemaphoreInfo *) NULL);
UnlockSemaphoreInfo(semaphore_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e A t t r i b u t e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageAttributeIterator() resets the image attributes iterator. Use it
% in conjunction with GetNextImageAttribute() to iterate over all the values
% associated with an image.
%
% Deprecated, replace with:
%
% ResetImagePropertyIterator(image);
%
% The format of the ResetImageAttributeIterator method is:
%
% ResetImageAttributeIterator(const ImageInfo *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageAttributeIterator(const Image *image)
{
ResetImagePropertyIterator(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as
% defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% Deprecated, replace with:
%
% QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
% GetCacheViewException(cache_view));
%
% The format of the SetCacheViewPixels method is:
%
% PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows)
{
PixelPacket
*pixels;
pixels=QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows,
GetCacheViewException(cache_view));
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t C a c h e T h e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetCacheThreshold() sets the amount of free memory allocated for the pixel
% cache. Once this threshold is exceeded, all subsequent pixels cache
% operations are to/from disk.
%
% The format of the SetCacheThreshold() method is:
%
% void SetCacheThreshold(const size_t threshold)
%
% A description of each parameter follows:
%
% o threshold: the number of megabytes of memory available to the pixel
% cache.
%
*/
MagickExport void SetCacheThreshold(const size_t size)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1");
(void) SetMagickResourceLimit(MemoryResource,size*1024*1024);
(void) SetMagickResourceLimit(MapResource,2*size*1024*1024);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t E x c e p t i o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetExceptionInfo() sets the exception severity.
%
% The format of the SetExceptionInfo method is:
%
% MagickBooleanType SetExceptionInfo(ExceptionInfo *exception,
% ExceptionType severity)
%
% A description of each parameter follows:
%
% o exception: the exception info.
%
% o severity: the exception severity.
%
*/
MagickExport MagickBooleanType SetExceptionInfo(ExceptionInfo *exception,
ExceptionType severity)
{
assert(exception != (ExceptionInfo *) NULL);
ClearMagickException(exception);
exception->severity=severity;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImage() sets the red, green, and blue components of each pixel to
% the image background color and the opacity component to the specified
% level of transparency. The background color is defined by the
% background_color member of the image.
%
% The format of the SetImage method is:
%
% void SetImage(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: Set each pixel to this level of transparency.
%
*/
MagickExport void SetImage(Image *image,const Quantum opacity)
{
PixelPacket
background_color;
ssize_t
y;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.0");
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
background_color=image->background_color;
if (opacity != OpaqueOpacity)
background_color.opacity=opacity;
if (background_color.opacity != OpaqueOpacity)
{
(void) SetImageStorageClass(image,DirectClass);
image->matte=MagickTrue;
}
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
{
/*
Set colormapped or CMYK image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRGBO(q,&background_color);
q++;
}
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,0);
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
return;
}
/*
Set DirectClass image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRGBO(q,&background_color);
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAttribute() searches the list of image attributes and replaces the
% attribute value. If it is not found in the list, the attribute name
% and value is added to the list.
%
% Deprecated, replace with:
%
% SetImageProperty(image,key,value);
%
% The format of the SetImageAttribute method is:
%
% MagickBooleanType SetImageAttribute(Image *image,const char *key,
% const char *value)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o key: the key.
%
% o value: the value.
%
*/
MagickExport MagickBooleanType SetImageAttribute(Image *image,const char *key,
const char *value)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1");
return(SetImageProperty(image,key,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageList() inserts an image into the list at the specified position.
%
% The format of the SetImageList method is:
%
% unsigned int SetImageList(Image *images,const Image *image,
% const ssize_t offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o offset: the position within the list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int SetImageList(Image **images,const Image *image,
const ssize_t offset,ExceptionInfo *exception)
{
Image
*clone;
register ssize_t
i;
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
clone=CloneImageList(image,exception);
while (GetPreviousImageInList(*images) != (Image *) NULL)
(*images)=GetPreviousImageInList(*images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(*images) == (Image *) NULL)
return(MagickFalse);
(*images)=GetNextImageInList(*images);
}
InsertImageInList(images,clone);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImagePixels() queues a mutable pixel region.
% If the region is successfully initialized a pointer to a PixelPacket
% array representing the region is returned, otherwise NULL is returned.
% The returned pointer may point to a temporary working buffer for the
% pixels or it may point to the final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This useful while the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% SetImagePixels() any way it pleases. SetImagePixels() does not initialize
% the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in RAM, or in a
% memory-mapped file. The returned pointer should *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% Deprecated, replace with:
%
% QueueAuthenticPixels(image,x,y,columns,rows,&image->exception);
%
% The format of the SetImagePixels() method is:
%
% PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o pixels: SetImagePixels returns a pointer to the pixels if they are
% transferred, otherwise a NULL is returned.
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
*/
MagickExport PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows)
{
return(QueueAuthenticPixels(image,x,y,columns,rows,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a g i c k R e g i s t r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMagickRegistry() sets a blob into the registry and returns a unique ID.
% If an error occurs, -1 is returned.
%
% The format of the SetMagickRegistry method is:
%
% ssize_t SetMagickRegistry(const RegistryType type,const void *blob,
% const size_t length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o type: the registry type.
%
% o blob: the address of a Binary Large OBject.
%
% o length: For a registry type of ImageRegistryType use sizeof(Image)
% otherise the blob length in number of bytes.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ssize_t SetMagickRegistry(const RegistryType type,const void *blob,
const size_t magick_unused(length),ExceptionInfo *exception)
{
char
key[MaxTextExtent];
MagickBooleanType
status;
static ssize_t
id = 0;
magick_unreferenced(length);
(void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id);
status=SetImageRegistry(type,key,blob,exception);
if (status == MagickFalse)
return(-1);
return(id++);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M o n i t o r H a n d l e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMonitorHandler() sets the monitor handler to the specified method
% and returns the previous monitor handler.
%
% The format of the SetMonitorHandler method is:
%
% MonitorHandler SetMonitorHandler(MonitorHandler handler)
%
% A description of each parameter follows:
%
% o handler: Specifies a pointer to a method to handle monitors.
%
*/
MagickExport MonitorHandler GetMonitorHandler(void)
{
return(monitor_handler);
}
MagickExport MonitorHandler SetMonitorHandler(MonitorHandler handler)
{
MonitorHandler
previous_handler;
previous_handler=monitor_handler;
monitor_handler=handler;
return(previous_handler);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h i f t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShiftImageList() removes an image from the beginning of the list.
%
% Deprecated, replace with:
%
% RemoveFirstImageFromList(images);
%
% The format of the ShiftImageList method is:
%
% Image *ShiftImageList(Image **images)
%
% A description of each parameter follows:
%
% o images: the image list.
%
*/
MagickExport Image *ShiftImageList(Image **images)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
return(RemoveFirstImageFromList(images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S i z e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SizeBlob() returns the current length of the image file or blob.
%
% Deprecated, replace with:
%
% GetBlobSize(image);
%
% The format of the SizeBlob method is:
%
% off_t SizeBlob(Image *image)
%
% A description of each parameter follows:
%
% o size: Method SizeBlob returns the current length of the image file
% or blob.
%
% o image: the image.
%
*/
MagickExport MagickOffsetType SizeBlob(Image *image)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3");
return((MagickOffsetType) GetBlobSize(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImageList() removes the images designated by offset and length from
% the list and replaces them with the specified list.
%
% The format of the SpliceImageList method is:
%
% Image *SpliceImageList(Image *images,const ssize_t offset,
% const size_t length,const Image *splices,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o offset: the position within the list.
%
% o length: the length of the image list to remove.
%
% o splice: Replace the removed image list with this list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImageList(Image *images,const ssize_t offset,
const size_t length,const Image *splices,ExceptionInfo *exception)
{
Image
*clone;
register ssize_t
i;
if (images->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
clone=CloneImageList(splices,exception);
while (GetPreviousImageInList(images) != (Image *) NULL)
images=GetPreviousImageInList(images);
for (i=0; i < offset; i++)
{
if (GetNextImageInList(images) == (Image *) NULL)
return((Image *) NULL);
images=GetNextImageInList(images);
}
(void) SpliceImageIntoList(&images,length,clone);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% s R G B C o m p a n d o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBCompandor() adds the gamma function to a sRGB pixel.
%
% The format of the sRGBCompandor method is:
%
% MagickRealType sRGBCompandor(const MagickRealType pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
MagickExport MagickRealType sRGBCompandor(const MagickRealType pixel)
{
if (pixel <= (0.0031306684425005883*QuantumRange))
return(12.92*pixel);
return(QuantumRange*(1.055*pow(QuantumScale*pixel,1.0/2.4)-0.055));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Strip() strips any whitespace or quotes from the beginning and end of a
% string of characters.
%
% The format of the Strip method is:
%
% void Strip(char *message)
%
% A description of each parameter follows:
%
% o message: Specifies an array of characters.
%
*/
MagickExport void Strip(char *message)
{
register char
*p,
*q;
assert(message != (char *) NULL);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (*message == '\0')
return;
if (strlen(message) == 1)
return;
p=message;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if ((*p == '\'') || (*p == '"'))
p++;
q=message+strlen(message)-1;
while ((isspace((int) ((unsigned char) *q)) != 0) && (q > p))
q--;
if (q > p)
if ((*q == '\'') || (*q == '"'))
q--;
(void) memcpy(message,p,(size_t) (q-p+1));
message[q-p+1]='\0';
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c C a c h e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncCacheView() saves the cache view pixels to the in-memory or disk
% cache. It returns MagickTrue if the pixel region is synced, otherwise
% MagickFalse.
%
% Deprecated, replace with:
%
% SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view));
%
% The format of the SyncCacheView method is:
%
% MagickBooleanType SyncCacheView(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
*/
MagickExport MagickBooleanType SyncCacheView(CacheView *cache_view)
{
MagickBooleanType
status;
status=SyncCacheViewAuthenticPixels(cache_view,
GetCacheViewException(cache_view));
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c C a c h e V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncCacheViewPixels() saves the cache view pixels to the in-memory
% or disk cache. It returns MagickTrue if the pixel region is flushed,
% otherwise MagickFalse.
%
% Deprecated, replace with:
%
% SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view));
%
% The format of the SyncCacheViewPixels method is:
%
% MagickBooleanType SyncCacheViewPixels(CacheView *cache_view)
%
% A description of each parameter follows:
%
% o cache_view: the cache view.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncCacheViewPixels(CacheView *cache_view)
{
MagickBooleanType
status;
status=SyncCacheViewAuthenticPixels(cache_view,
GetCacheViewException(cache_view));
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is synced, otherwise
% MagickFalse.
%
% Deprecated, replace with:
%
% SyncAuthenticPixels(image,&image->exception);
%
% The format of the SyncImagePixels() method is:
%
% MagickBooleanType SyncImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagePixels(Image *image)
{
return(SyncAuthenticPixels(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y s t e m C o m m a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SystemCommand() executes the specified command and waits until it
% terminates. The returned value is the exit status of the command.
%
% The format of the SystemCommand method is:
%
% int SystemCommand(const MagickBooleanType asynchronous,
% const MagickBooleanType verbose,const char *command,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o asynchronous: a value other than 0 executes the parent program
% concurrently with the new child process.
%
% o verbose: a value other than 0 prints the executed command before it is
% invoked.
%
% o command: this string is the command to execute.
%
% o exception: return any errors here.
%
*/
MagickExport int SystemCommand(const MagickBooleanType asynchronous,
const MagickBooleanType verbose,const char *command,ExceptionInfo *exception)
{
int
status;
status=ExternalDelegateCommand(asynchronous,verbose,command,(char *) NULL,
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e m p o r a r y F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TemporaryFilename() replaces the contents of path by a unique path name.
%
% The format of the TemporaryFilename method is:
%
% void TemporaryFilename(char *path)
%
% A description of each parameter follows.
%
% o path: Specifies a pointer to an array of characters. The unique path
% name is returned in this array.
%
*/
MagickExport void TemporaryFilename(char *path)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
(void) AcquireUniqueFilename(path);
(void) RelinquishUniqueFileResource(path);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThresholdImage() changes the value of individual pixels based on
% the intensity of each pixel compared to threshold. The result is a
% high-contrast, two color image.
%
% The format of the ThresholdImage method is:
%
% unsigned int ThresholdImage(Image *image,const double threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value
%
*/
MagickExport unsigned int ThresholdImage(Image *image,const double threshold)
{
#define ThresholdImageTag "Threshold/Image"
IndexPacket
index;
ssize_t
y;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7");
if (!AcquireImageColormap(image,2))
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
"UnableToThresholdImage");
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) (GetPixelIntensity(image,q) <=
threshold ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (!SyncAuthenticPixels(image,&image->exception))
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h r e s h o l d I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThresholdImageChannel() changes the value of individual pixels based on
% the intensity of each pixel channel. The result is a high-contrast image.
%
% The format of the ThresholdImageChannel method is:
%
% unsigned int ThresholdImageChannel(Image *image,const char *threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
*/
MagickExport unsigned int ThresholdImageChannel(Image *image,
const char *threshold)
{
#define ThresholdImageTag "Threshold/Image"
MagickPixelPacket
pixel;
GeometryInfo
geometry_info;
IndexPacket
index;
ssize_t
y;
unsigned int
flags;
/*
Threshold image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (threshold == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&pixel);
flags=ParseGeometry(threshold,&geometry_info);
pixel.red=geometry_info.rho;
if (flags & SigmaValue)
pixel.green=geometry_info.sigma;
else
pixel.green=pixel.red;
if (flags & XiValue)
pixel.blue=geometry_info.xi;
else
pixel.blue=pixel.red;
if (flags & PsiValue)
pixel.opacity=geometry_info.psi;
else
pixel.opacity=(MagickRealType) OpaqueOpacity;
if (flags & PercentValue)
{
pixel.red*=QuantumRange/100.0f;
pixel.green*=QuantumRange/100.0f;
pixel.blue*=QuantumRange/100.0f;
pixel.opacity*=QuantumRange/100.0f;
}
if (!(flags & SigmaValue))
{
if (!AcquireImageColormap(image,2))
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
"UnableToThresholdImage");
if (pixel.red == 0)
(void) GetImageDynamicThreshold(image,2.0,2.0,&pixel,&image->exception);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
if (IsMagickGray(&pixel) != MagickFalse)
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) (GetPixelIntensity(image,q) <= pixel.red ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRed(q,image->colormap[(ssize_t) index].red);
SetPixelGreen(q,image->colormap[(ssize_t) index].green);
SetPixelBlue(q,image->colormap[(ssize_t) index].blue);
q++;
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,(MagickRealType) q->red <= pixel.red
? 0 : QuantumRange);
SetPixelGreen(q,(MagickRealType) q->green <= pixel.green
? 0 : QuantumRange);
SetPixelBlue(q,(MagickRealType) q->blue <= pixel.blue
? 0 : QuantumRange);
SetPixelOpacity(q,(MagickRealType) q->opacity <= pixel.opacity
? 0 : QuantumRange);
q++;
}
if (!SyncAuthenticPixels(image,&image->exception))
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformColorspace() converts the image to a specified colorspace.
% If the image is already in the requested colorspace, no work is performed.
% Note that the current colorspace is stored in the image colorspace member.
% The transformation matrices are not necessarily the standard ones: the
% weights are rescaled to normalize the range of the transformed values to
% be [0..QuantumRange].
%
% Deprecated, replace with:
%
% TransformImageColorspace(image,colorspace);
%
% The format of the TransformColorspace method is:
%
% unsigned int (void) TransformColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image to transform
%
% o colorspace: the desired colorspace.
%
*/
MagickExport unsigned int TransformColorspace(Image *image,
const ColorspaceType colorspace)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6");
return(TransformImageColorspace(image,colorspace));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m H S L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformHSL() converts a (red, green, blue) to a (hue, saturation,
% lightness) triple.
%
% The format of the TransformHSL method is:
%
% void TransformHSL(const Quantum red,const Quantum green,
% const Quantum blue,double *hue,double *saturation,double *lightness)
%
% A description of each parameter follows:
%
% o red, green, blue: A Quantum value representing the red, green, and
% blue component of a pixel..
%
% o hue, saturation, lightness: A pointer to a double value representing a
% component of the HSL color space.
%
*/
MagickExport void TransformHSL(const Quantum red,const Quantum green,
const Quantum blue,double *hue,double *saturation,double *lightness)
{
MagickRealType
b,
delta,
g,
max,
min,
r;
/*
Convert RGB to HSL colorspace.
*/
assert(hue != (double *) NULL);
assert(saturation != (double *) NULL);
assert(lightness != (double *) NULL);
r=QuantumScale*red;
g=QuantumScale*green;
b=QuantumScale*blue;
max=MagickMax(r,MagickMax(g,b));
min=MagickMin(r,MagickMin(g,b));
*hue=0.0;
*saturation=0.0;
*lightness=(double) ((min+max)/2.0);
delta=max-min;
if (delta == 0.0)
return;
*saturation=(double) (delta/((*lightness < 0.5) ? (min+max) :
(2.0-max-min)));
if (r == max)
*hue=(double) (g == min ? 5.0+(max-b)/delta : 1.0-(max-g)/delta);
else
if (g == max)
*hue=(double) (b == min ? 1.0+(max-r)/delta : 3.0-(max-b)/delta);
else
*hue=(double) (r == min ? 3.0+(max-g)/delta : 5.0-(max-r)/delta);
*hue/=6.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s l a t e T e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TranslateText() replaces any embedded formatting characters with the
% appropriate image attribute and returns the translated text.
%
% Deprecated, replace with:
%
% InterpretImageProperties(image_info,image,embed_text);
%
% The format of the TranslateText method is:
%
% char *TranslateText(const ImageInfo *image_info,Image *image,
% const char *embed_text)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o embed_text: the address of a character string containing the embedded
% formatting characters.
%
*/
MagickExport char *TranslateText(const ImageInfo *image_info,Image *image,
const char *embed_text)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.6");
return(InterpretImageProperties(image_info,image,embed_text));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the TransparentImage method is:
%
% MagickBooleanType TransparentImage(Image *image,
% const PixelPacket target,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o opacity: the replacement opacity value.
%
*/
MagickExport MagickBooleanType TransparentImage(Image *image,
const PixelPacket target,const Quantum opacity)
{
#define TransparentImageTag "Transparent/Image"
MagickBooleanType
proceed;
ssize_t
y;
/*
Make image color transparent.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0");
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) != MagickFalse)
q->opacity=opacity;
q++;
}
if (SyncAuthenticPixels(image,&image->exception) == MagickFalse)
break;
proceed=SetImageProgress(image,TransparentImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h i f t I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnshiftImageList() adds the image to the beginning of the list.
%
% Deprecated, replace with:
%
% PrependImageToList(images,CloneImageList(image,exception));
%
% The format of the UnshiftImageList method is:
%
% unsigned int UnshiftImageList(Image *images,const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image list.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport unsigned int UnshiftImageList(Image **images,const Image *image,
ExceptionInfo *exception)
{
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2");
PrependImageToList(images,CloneImageList(image,exception));
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ V a l i d a t e C o l o r m a p I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ValidateColormapIndex() validates the colormap index. If the index does
% not range from 0 to the number of colors in the colormap an exception
% issued and 0 is returned.
%
% Deprecated, replace with:
%
% ConstrainColormapIndex(image,index);
%
% The format of the ValidateColormapIndex method is:
%
% IndexPacket ValidateColormapIndex(Image *image,const unsigned int index)
%
% A description of each parameter follows:
%
% o index: Method ValidateColormapIndex returns colormap index if it is
% valid other an exception issued and 0 is returned.
%
% o image: the image.
%
% o index: This integer is the colormap index.
%
*/
MagickExport IndexPacket ValidateColormapIndex(Image *image,
const size_t index)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.4");
return(ConstrainColormapIndex(image,index));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z o o m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZoomImage() creates a new image that is a scaled size of an existing one.
% It allocates the memory necessary for the new Image structure and returns a
% pointer to the new image. The Point filter gives fast pixel replication,
% Triangle is equivalent to bi-linear interpolation, and Mitchel giver slower,
% very high-quality results. See Graphic Gems III for details on this
% algorithm.
%
% The filter member of the Image structure specifies which image filter to
% use. Blur specifies the blur factor where > 1 is blurry, < 1 is sharp.
%
% The format of the ZoomImage method is:
%
% Image *ZoomImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: An integer that specifies the number of columns in the zoom
% image.
%
% o rows: An integer that specifies the number of rows in the scaled
% image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ZoomImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
Image
*zoom_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
zoom_image=ResizeImage(image,columns,rows,image->filter,image->blur,
exception);
return(zoom_image);
}
#endif
|
rose_dep_distance.c | /*
*Test dependence distance
* */
void foo()
{
int i;
int a[100];
/* Constant offset*/
for (i = 0; i <= 98; i += 1) {
a[i + 3] = a[i - 5] + 1;
}
}
void foo2(int j,int k)
{
int i;
int a[100];
/*variable offset*/
for (i = 0; i <= 98; i += 1) {
a[i + j] = a[i + k] + 1;
}
}
int b[100][100];
void foo3()
{
int i;
int j;
/*two level with constant offset*/
#pragma omp parallel for private (i,j)
for (i = 1; i <= 99; i += 1) {
for (j = 1; j <= 99; j += 1) {
b[i][j] = b[i][j - 1] + 1;
}
}
}
|
omp_taskloop_grainsize.c | // RUN: %libomp-compile-and-run
// RUN: %libomp-compile && env KMP_TASKLOOP_MIN_TASKS=1 %libomp-run
// REQUIRES: openmp-4.5
// These compilers don't support the taskloop construct
// UNSUPPORTED: gcc-4, gcc-5, icc-16
// GCC 6 has support for taskloops, but at least 6.3.0 is crashing on this test
// UNSUPPORTED: gcc-6
/*
* Test for taskloop
* Method: caculate how many times the iteration space is dispatched
* and judge if each dispatch has the requested grainsize
* It is possible for two adjacent chunks are executed by the same thread
*/
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#define CFDMAX_SIZE 1120
int test_omp_taskloop_grainsize()
{
int result = 0;
int i, grainsize, count, tmp_count, num_off;
int *tmp, *tids, *tidsArray;
tidsArray = (int *)malloc(sizeof(int) * CFDMAX_SIZE);
tids = tidsArray;
for (grainsize = 1; grainsize < 48; ++grainsize) {
fprintf(stderr, "Grainsize %d\n", grainsize);
count = tmp_count = num_off = 0;
for (i = 0; i < CFDMAX_SIZE; ++i) {
tids[i] = -1;
}
#pragma omp parallel shared(tids)
{
#pragma omp master
#pragma omp taskloop grainsize(grainsize)
for (i = 0; i < CFDMAX_SIZE; i++) {
tids[i] = omp_get_thread_num();
}
}
for (i = 0; i < CFDMAX_SIZE; ++i) {
if (tids[i] == -1) {
fprintf(stderr, " Iteration %d not touched!\n", i);
result++;
}
}
for (i = 0; i < CFDMAX_SIZE - 1; ++i) {
if (tids[i] != tids[i + 1]) {
count++;
}
}
tmp = (int *)malloc(sizeof(int) * (count + 1));
tmp[0] = 1;
for (i = 0; i < CFDMAX_SIZE - 1; ++i) {
if (tmp_count > count) {
printf("--------------------\nTestinternal Error: List too "
"small!!!\n--------------------\n");
break;
}
if (tids[i] != tids[i + 1]) {
tmp_count++;
tmp[tmp_count] = 1;
} else {
tmp[tmp_count]++;
}
}
// is grainsize statement working?
int num_tasks = CFDMAX_SIZE / grainsize;
int multiple1 = CFDMAX_SIZE / num_tasks;
int multiple2 = CFDMAX_SIZE / num_tasks + 1;
for (i = 0; i < count; i++) {
// it is possible for 2 adjacent chunks assigned to a same thread
if (tmp[i] % multiple1 != 0 && tmp[i] % multiple2 != 0) {
num_off++;
}
}
if (num_off > 1) {
fprintf(stderr, " The number of bad chunks is %d\n", num_off);
result++;
} else {
fprintf(stderr, " Everything ok\n");
}
free(tmp);
}
free(tidsArray);
return (result==0);
}
int main()
{
int i;
int num_failed=0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_omp_taskloop_grainsize()) {
num_failed++;
}
}
return num_failed;
}
|
GB_unaryop__abs_uint64_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint64_uint8
// op(A') function: GB_tran__abs_uint64_uint8
// C type: uint64_t
// A type: uint8_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint64_uint8
(
uint64_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint64_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__sinh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__sinh_fc32_fc32
// op(A') function: GB_unop_tran__sinh_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = csinhf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = csinhf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = csinhf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SINH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__sinh_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = csinhf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = csinhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__sinh_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator(
llvm::function_ref<void(const Designation &)> CodeCompleteCB);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype,
SourceLocation SwiftNewtypeLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
void ParsePtrauthQualifier(ParsedAttributes &Attrs);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitInfo::OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitInfo::OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitInfo::OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitInfo::OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitInfo::OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitInfo::OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
NamedDecl *
ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
/// Parse the given string as a type.
///
/// This is a dangerous utility function currently employed only by API notes.
/// It is not a general entry-point for safely parsing types from strings.
///
/// \param typeStr The string to be parsed as a type.
/// \param context The name of the context in which this string is being
/// parsed, which will be used in diagnostics.
/// \param includeLoc The location at which this parse was triggered.
TypeResult parseTypeFromString(StringRef typeStr, StringRef context,
SourceLocation includeLoc);
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
ExprResult ParseBuiltinPtrauthTypeDiscriminator();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
omp_bug5fix.c | /******************************************************************************
* FILE: omp_bug5fix.c
* DESCRIPTION:
* The problem in omp_bug5.c is that the first thread acquires locka and then
* tries to get lockb before releasing locka. Meanwhile, the second thread
* has acquired lockb and then tries to get locka before releasing lockb.
* This solution overcomes the deadlock by using locks correctly.
* AUTHOR: Blaise Barney 01/29/04
* LAST REVISED: 08/15/11
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 1000000
#define PI 3.1415926535
#define DELTA .01415926535
int main (int argc, char *argv[])
{
int nthreads, tid, i;
float a[N], b[N];
omp_lock_t locka, lockb;
/* Initialize the locks */
omp_init_lock(&locka);
omp_init_lock(&lockb);
/* Initialize the arrays */
for (i=0; i<N; i++) {
a[i]=0;
b[i]=0;
}
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel shared(a, b, nthreads, locka, lockb) private(tid, i)
{
/* Obtain thread number and number of threads */
tid = omp_get_thread_num();
#pragma omp master
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n", tid);
#pragma omp barrier
#pragma omp sections nowait
{
#pragma omp section
{
omp_set_lock(&locka);
printf("Thread %d updating a[]\n",tid);
for (i=0; i<N; i++)
a[i] += DELTA * i;
omp_unset_lock(&locka);
omp_set_lock(&lockb);
printf("Thread %d updating b[]\n",tid);
for (i=0; i<N; i++)
b[i] += DELTA + i;
omp_unset_lock(&lockb);
}
#pragma omp section
{
omp_set_lock(&lockb);
printf("Thread %d updating b[]\n",tid);
for (i=0; i<N; i++)
b[i] += PI * i;
omp_unset_lock(&lockb);
omp_set_lock(&locka);
printf("Thread %d updating a[]\n",tid);
for (i=0; i<N; i++)
a[i] += PI + i;
omp_unset_lock(&locka);
}
} /* end of sections */
} /* end of parallel region */
printf("Sample results: %f %f %f %f\n",a[0],b[0],a[999999],b[999999]);
}
|
DRB059-lastprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Using lastprivate() to resolve an output dependence.
Semantics of lastprivate (x):
causes the corresponding original list item to be updated after the end of the region.
The compiler/runtime copies the local value back to the shared one within the last iteration.
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
void foo()
{
int i,x;
#pragma omp parallel for private (i) lastprivate (x)
for (i=0;i<100;i++)
x=i;
printf("x=%d",x);
}
int main()
{
omprace_init();
foo();
omprace_fini();
return 0;
}
|
GB_binop__ge_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_uint8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__ge_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint8)
// A*D function (colscale): GB (_AxD__ge_uint8)
// D*A function (rowscale): GB (_DxB__ge_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint8)
// C=scalar+B GB (_bind1st__ge_uint8)
// C=scalar+B' GB (_bind1st_tran__ge_uint8)
// C=A+scalar GB (_bind2nd__ge_uint8)
// C=A'+scalar GB (_bind2nd_tran__ge_uint8)
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_UINT8 || GxB_NO_GE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ge_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
trsm_c_sky_u_lo_col_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT c = A->cols - 1; c >= 0; c--)
{
ALPHA_Complex temp = {.real = 0.f, .imag = 0.f};
for (ALPHA_INT ic = A->cols - 1; ic > c; ic--)
{
ALPHA_INT start = A->pointers[ic];
ALPHA_INT end = A->pointers[ic + 1];
ALPHA_INT eles_num = ic - c;
if(end - eles_num - 1 >= start)
{
ALPHA_Complex cv = A->values[end - eles_num - 1];
alpha_conj(cv, cv);
alpha_madde(temp, cv, y[out_y_col * ldy + ic]);
}
}
ALPHA_Complex t;
alpha_mul(t, alpha, x[out_y_col * ldx + c]);
alpha_sub(y[out_y_col * ldy + c], t, temp);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
ADR_SUPGassembler_C_omp.c | /* This file is part of redbKIT.
* Copyright (c) 2016, Ecole Polytechnique Federale de Lausanne (EPFL)
* Author: Federico Negri <federico.negri@epfl.ch>
*/
#include "mex.h"
#include <stdio.h>
#include <math.h>
#include "blas.h"
#include <string.h>
#include "../../Core/Tools.h"
#define INVJAC(i,j,k) invjac[i+(j+k*dim)*noe]
#define GRADREFPHI(i,j,k) gradrefphi[i+(j+k*NumQuadPoints)*nln]
#ifdef _OPENMP
#include <omp.h>
#else
#warning "OpenMP not enabled. Compile with mex ADR_SUPGassembler_C_omp.c CFLAGS="\$CFLAGS -fopenmp" LDFLAGS="\$LDFLAGS -fopenmp""
#endif
void mexFunction(int nlhs,mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
/* Check for proper number of arguments. */
if(nrhs!=14) {
mexErrMsgTxt("14 inputs are required.");
} else if(nlhs>6) {
mexErrMsgTxt("Too many output arguments.");
}
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[3]);
double* nln_ptr = mxGetPr(prhs[4]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[3]);
int nln2 = nln*nln;
double* tmp_ptr1 = mxGetPr(prhs[2]);
double dt = tmp_ptr1[0];
/**/
plhs[0] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[2] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[3] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[4] = mxCreateDoubleMatrix(nln*noe,1, mxREAL);
plhs[5] = mxCreateDoubleMatrix(nln*noe,1, mxREAL);
double* myArows = mxGetPr(plhs[0]);
double* myAcols = mxGetPr(plhs[1]);
double* myAcoef = mxGetPr(plhs[2]);
double* myMcoef = mxGetPr(plhs[3]);
double* myRrows = mxGetPr(plhs[4]);
double* myRcoef = mxGetPr(plhs[5]);
/* copy the string data from prhs[0] into a C string input_ buf. */
char *StabType = mxArrayToString(prhs[1]);
bool flag_t = false;
if (strcmp(StabType, "SUPGt")==0)
{
flag_t = true;
}
mxFree(StabType);
/* Local mass matrix (computed only once) with quadrature nodes */
double LocalMass[nln][nln];
int q;
int NumQuadPoints = mxGetN(prhs[9]);
double* mu = mxGetPr(prhs[5]);
double* conv_field = mxGetPr(prhs[6]);
double* si = mxGetPr(prhs[7]);
double* f = mxGetPr(prhs[8]);
double* w = mxGetPr(prhs[9]);
double* invjac = mxGetPr(prhs[10]);
double* detjac = mxGetPr(prhs[11]);
double* phi = mxGetPr(prhs[12]);
double* gradrefphi = mxGetPr(prhs[13]);
int l,k;
double* elements = mxGetPr(prhs[3]);
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,mu,conv_field,si,f,detjac,elements, myRrows, myRcoef,myAcols, myArows, myAcoef, myMcoef) private(ie,k,l,q) firstprivate(phi,gradrefphi, w, numRowsElements, nln2, nln)
for (ie = 0; ie < noe; ie = ie + 1 )
{
double gradphi[dim][nln][NumQuadPoints];
int d1, d2;
for (k = 0; k < nln; k = k + 1 )
{
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[d1][k][q] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[d1][k][q] = gradphi[d1][k][q] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
}
/* compute metric tensors G and g */
double G[dim][dim];
double g[dim];
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
g[d1] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
G[d1][d2] = 0.0;
int d3;
for (d3 = 0; d3 < dim; d3 = d3 + 1 )
{
G[d1][d2] += INVJAC(ie,d1,d3) * INVJAC(ie,d2,d3);
}
g[d1] = g[d1] + INVJAC(ie,d1,d2);
}
}
double traceGtG = Mdot(dim, G, G);
double tauK[NumQuadPoints];
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
double b_hq[dim];
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
b_hq[d1] = conv_field[ie+(q+d1*NumQuadPoints)*noe];
}
double G_U_hq[dim];
MatrixVector(dim, dim, G, b_hq, G_U_hq);
tauK[q] = pow( flag_t * 4/(dt*dt) + ScalarProduct(dim, b_hq, G_U_hq) + 9*mu[ie+q*noe]*mu[ie+q*noe]*traceGtG, -0.5);
}
int iii = 0;
int ii = 0;
int a, b;
double bh_gradPHI[nln][NumQuadPoints];
for (k = 0; k < nln; k = k + 1 )
{
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
bh_gradPHI[k][q] = 0;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
bh_gradPHI[k][q] += conv_field[ie+(q+d1*NumQuadPoints)*noe] * gradphi[d1][k][q];
}
}
}
/* a test, b trial */
for (a = 0; a < nln; a = a + 1 )
{
for (b = 0; b < nln; b = b + 1 )
{
double aloc = 0;
double mloc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
aloc += (bh_gradPHI[b][q] + si[ie+q*noe] * phi[b+q*nln]) * bh_gradPHI[a][q] * tauK[q] * w[q];
mloc += phi[b+q*nln] * bh_gradPHI[a][q] * tauK[q] * w[q];
}
myArows[ie*nln2+iii] = elements[a+ie*numRowsElements];
myAcols[ie*nln2+iii] = elements[b+ie*numRowsElements];
myAcoef[ie*nln2+iii] = aloc*detjac[ie];
myMcoef[ie*nln2+iii] = mloc*detjac[ie];
iii = iii + 1;
}
double floc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
floc += ( bh_gradPHI[a][q] * f[ie+q*noe] * tauK[q] ) * w[q];
}
myRrows[ie*nln+ii] = elements[a+ie*numRowsElements];
myRcoef[ie*nln+ii] = floc*detjac[ie];
ii = ii + 1;
}
}
}
|
pcpdlpverifydsaca.c | /*******************************************************************************
* Copyright 2005-2019 Intel Corporation
* All Rights Reserved.
*
* If this software was obtained under the Intel Simplified Software License,
* the following terms apply:
*
* The source code, information and material ("Material") contained herein is
* owned by Intel Corporation or its suppliers or licensors, and title to such
* Material remains with Intel Corporation or its suppliers or licensors. The
* Material contains proprietary information of Intel or its suppliers and
* licensors. The Material is protected by worldwide copyright laws and treaty
* provisions. No part of the Material may be used, copied, reproduced,
* modified, published, uploaded, posted, transmitted, distributed or disclosed
* in any way without Intel's prior express written permission. No license under
* any patent, copyright or other intellectual property rights in the Material
* is granted to or conferred upon you, either expressly, by implication,
* inducement, estoppel or otherwise. Any license under such intellectual
* property rights must be express and approved by Intel in writing.
*
* Unless otherwise agreed by Intel in writing, you may not remove or alter this
* notice or any other notice embedded in Materials by Intel or Intel's
* suppliers or licensors in any way.
*
*
* If this software was obtained under the Apache License, Version 2.0 (the
* "License"), the following terms apply:
*
* You may not use this file except in compliance with the License. You may
* obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/*
//
// Purpose:
// Cryptography Primitive.
// DL over Prime Finite Field (Verify, DSA version)
//
// Contents:
// ippsDLPVerifyDSA()
//
//
*/
#include "owndefs.h"
#include "owncp.h"
#include "pcpdlp.h"
/*F*
// Name: ippsDLPVerifyDSA
//
// Purpose: Verify Signature (DSA version)
//
// Returns: Reason:
// ippStsNullPtrErr NULL == pDL
// NULL == pMsgDigest
// NULL == pSignR
// NULL == pSignS
// NULL == pResult
//
// ippStsContextMatchErr illegal pDL->idCtx
// illegal pMsgDigest->idCtx
// illegal pSignR->idCtx
// illegal pSignS->idCtx
//
// ippStsIncompleteContextErr
// incomplete context
//
// ippStsMessageErr MsgDigest >= R
// MsgDigest < 0
//
// ippStsNoErr no errors
//
// Parameters:
// pMsgDigest pointer to the message representative to be signed
// pSignR,pSignS pointer to the signature
// pResult pointer to the result: IppSignIsValid/IppSignIsInvalid
// pDSA pointer to the DL context
//
// Primitive sequence call:
// 1) set up domain parameters
// 2) set up (signatory's) public key
*F*/
#if !defined(_OPENMP)
IPPFUN(IppStatus, ippsDLPVerifyDSA,(const IppsBigNumState* pMsgDigest,
const IppsBigNumState* pSignR, const IppsBigNumState* pSignS,
IppDLResult* pResult,
IppsDLPState* pDL))
{
/* test context*/
IPP_BAD_PTR2_RET(pDL,pResult);
pDL = (IppsDLPState*)( IPP_ALIGNED_PTR(pDL, DLP_ALIGNMENT) );
IPP_BADARG_RET(!DLP_VALID_ID(pDL), ippStsContextMatchErr);
/* test operation flag */
IPP_BADARG_RET(!DLP_COMPLETE(pDL), ippStsIncompleteContextErr);
/* test message representative */
IPP_BAD_PTR1_RET(pMsgDigest);
pMsgDigest = (IppsBigNumState*)( IPP_ALIGNED_PTR(pMsgDigest, BN_ALIGNMENT) );
IPP_BADARG_RET(!BN_VALID_ID(pMsgDigest), ippStsContextMatchErr);
IPP_BADARG_RET(BN_NEGATIVE(pMsgDigest), ippStsMessageErr);
/* make sure msg <order */
IPP_BADARG_RET(0<=cpCmp_BNU(BN_NUMBER(pMsgDigest), BN_SIZE(pMsgDigest),
DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL))), ippStsMessageErr);
/* test signature */
IPP_BAD_PTR2_RET(pSignR,pSignS);
pSignR = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignR, BN_ALIGNMENT) );
pSignS = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignS, BN_ALIGNMENT) );
IPP_BADARG_RET(!BN_VALID_ID(pSignR), ippStsContextMatchErr);
IPP_BADARG_RET(!BN_VALID_ID(pSignS), ippStsContextMatchErr);
/* test signature range */
if(0<cpBN_cmp(cpBN_OneRef(), pSignR)||
0<=cpCmp_BNU(BN_NUMBER(pSignR),BN_SIZE(pSignR), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) {
*pResult = ippDLInvalidSignature;
return ippStsNoErr;
}
if(0<cpBN_cmp(cpBN_OneRef(), pSignS)||
0<=cpCmp_BNU(BN_NUMBER(pSignS),BN_SIZE(pSignS), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) {
*pResult = ippDLInvalidSignature;
return ippStsNoErr;
}
{
/* allocate BN resources */
BigNumNode* pList = DLP_BNCTX(pDL);
IppsBigNumState* pW = cpBigNumListGet(&pList);
IppsBigNumState* pU1 = cpBigNumListGet(&pList);
IppsBigNumState* pU2 = cpBigNumListGet(&pList);
IppsBigNumState* pOrder = cpBigNumListGet(&pList);
ippsSet_BN(ippBigNumPOS, BITS2WORD32_SIZE(DLP_BITSIZER(pDL)), (Ipp32u*)DLP_R(pDL), pOrder);
/* W = 1/SignS (mod R) */
ippsModInv_BN((IppsBigNumState*)pSignS, pOrder, pW);
cpMontEnc_BN(pW, pW, DLP_MONTR(pDL));
/* reduct pMsgDigest if necessary */
if(0 < cpBN_cmp(pMsgDigest, pOrder))
ippsMod_BN((IppsBigNumState*)pMsgDigest, pOrder, pU1);
else
cpBN_copy(pU1, pMsgDigest);
/* U1 = (MsgDigest*W) (mod R) */
cpMontMul_BN(pU1, pW, pU1, DLP_MONTR(pDL));
/* U2 = (SignR*W) (mod R) */
cpMontMul_BN(pU2, pSignR, pW, DLP_MONTR(pDL));
/*
// V = ((G^U1)*(Y^U2) (mod P)) (mod R)
*/
/* precompute multi-exp table {1, G, Y, G*Y} */
{
cpSize pSize = BITS_BNU_CHUNK( DLP_BITSIZEP(pDL) );
BNU_CHUNK_T* pX1 = BN_NUMBER(DLP_GENC(pDL));
BNU_CHUNK_T* pX2 = BN_NUMBER(DLP_YENC(pDL));
const BNU_CHUNK_T* ppX[2];
ppX[0] = pX1;
ppX[1] = pX2;
ZEXPAND_BNU(pX1, BN_SIZE(DLP_GENC(pDL)), pSize);
ZEXPAND_BNU(pX2, BN_SIZE(DLP_YENC(pDL)), pSize);
cpMontMultiExpInitArray(DLP_METBL(pDL),
ppX, pSize*BITSIZE(BNU_CHUNK_T),
2,
DLP_MONTP0(pDL));
}
/* W = ((G^U1)*(Y^U2) (mod P) */
{
cpSize sizeE1 = BN_SIZE(pU1);
cpSize sizeE2 = BN_SIZE(pU2);
cpSize sizeE = IPP_MAX(sizeE1, sizeE2);
BNU_CHUNK_T* pE1 = BN_NUMBER(pU1);
BNU_CHUNK_T* pE2 = BN_NUMBER(pU2);
const Ipp8u* ppE[2];
ppE[0] = (Ipp8u*)pE1;
ppE[1] = (Ipp8u*)pE2;
ZEXPAND_BNU(pE1, sizeE1, sizeE);
ZEXPAND_BNU(pE2, sizeE2, sizeE);
cpFastMontMultiExp(BN_NUMBER(pW),
DLP_METBL(pDL),
ppE, sizeE*BITSIZE(BNU_CHUNK_T),
2,
DLP_MONTP0(pDL));
BN_SIZE(pW) = BITS_BNU_CHUNK( DLP_BITSIZEP(pDL) );
BN_SIGN(pW) = ippBigNumPOS;
}
cpMontDec_BN(pW, pW, DLP_MONTP0(pDL));
BN_SIZE(pW) = cpMod_BNU(BN_NUMBER(pW), BN_SIZE(pW),
BN_NUMBER(pOrder), BN_SIZE(pOrder));
/* result = W~R */
*pResult = 0==cpBN_cmp(pW, pSignR)? ippDLValid : ippDLInvalidSignature;
return ippStsNoErr;
}
}
//#endif
#else
IPPFUN(IppStatus, ippsDLPVerifyDSA,(const IppsBigNumState* pMsgDigest,
const IppsBigNumState* pSignR, const IppsBigNumState* pSignS,
IppDLResult* pResult,
IppsDLPState* pDL))
{
/* test context*/
IPP_BAD_PTR2_RET(pDL,pResult);
pDL = (IppsDLPState*)( IPP_ALIGNED_PTR(pDL, DLP_ALIGNMENT) );
IPP_BADARG_RET(!DLP_VALID_ID(pDL), ippStsContextMatchErr);
/* test operation flag */
IPP_BADARG_RET(!DLP_COMPLETE(pDL), ippStsIncompleteContextErr);
/* test message representative */
IPP_BAD_PTR1_RET(pMsgDigest);
pMsgDigest = (IppsBigNumState*)( IPP_ALIGNED_PTR(pMsgDigest, BN_ALIGNMENT) );
IPP_BADARG_RET(!BN_VALID_ID(pMsgDigest), ippStsContextMatchErr);
IPP_BADARG_RET((0>cpBN_tst(pMsgDigest)), ippStsMessageErr);
/* make sure msg <order */
IPP_BADARG_RET(0<=cpCmp_BNU(BN_NUMBER(pMsgDigest), BN_SIZE(pMsgDigest),
DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL))), ippStsMessageErr);
/* test signature */
IPP_BAD_PTR2_RET(pSignR,pSignS);
pSignR = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignR, BN_ALIGNMENT) );
pSignS = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignS, BN_ALIGNMENT) );
IPP_BADARG_RET(!BN_VALID_ID(pSignR), ippStsContextMatchErr);
IPP_BADARG_RET(!BN_VALID_ID(pSignS), ippStsContextMatchErr);
/* test signature range */
if(0<cpBN_cmp(cpBN_OneRef(), pSignR)||
0<=cpCmp_BNU(BN_NUMBER(pSignR),BN_SIZE(pSignR), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) {
*pResult = ippDLInvalidSignature;
return ippStsNoErr;
}
if(0<cpBN_cmp(cpBN_OneRef(), pSignS)||
0<=cpCmp_BNU(BN_NUMBER(pSignS),BN_SIZE(pSignS), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) {
*pResult = ippDLInvalidSignature;
return ippStsNoErr;
}
{
/* allocate BN resources */
BigNumNode* pList = DLP_BNCTX(pDL);
IppsBigNumState* pV = cpBigNumListGet(&pList);
IppsBigNumState* pW = cpBigNumListGet(&pList);
IppsBigNumState* pU1 = cpBigNumListGet(&pList);
IppsBigNumState* pU2 = cpBigNumListGet(&pList);
IppsBigNumState* pOrder = cpBigNumListGet(&pList);
ippsSet_BN(ippBigNumPOS, BITS2WORD32_SIZE(DLP_BITSIZER(pDL)), (Ipp32u*)DLP_R(pDL), pOrder);
//int maxNumThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), 2);
/* W = 1/SignS (mod R) */
ippsModInv_BN((IppsBigNumState*)pSignS, pOrder, pW);
cpMontEnc_BN(pW, pW, DLP_MONTR(pDL));
/* reduct pMsgDigest if necessary */
if(0 < cpBN_cmp(pMsgDigest, pOrder))
ippsMod_BN((IppsBigNumState*)pMsgDigest, pOrder, pU1);
else
cpBN_copy(pU1, pMsgDigest);
/* U1 = (MsgDigest*W) (mod R) */
cpMontMul_BN(pU1, pW, pU1, DLP_MONTR(pDL));
/* U2 = (SignR*W) (mod R) */
cpMontMul_BN(pU2, pSignR, pW, DLP_MONTR(pDL));
/* V = ((G^U1)*(Y^U2) (mod P)) (mod R) */
#pragma omp parallel sections IPPCP_OMP_LIMIT_MAX_NUM_THREADS(2)
{
/* W = (G^U1) (mod P) */
#pragma omp section
{
#if !defined(_USE_WINDOW_EXP_)
//cpSafeMontExp_Binary(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL));
cpMontExpBin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL) );
#else
if((DLP_EXPMETHOD(pDL)==BINARY) || (1==cpMontExp_WinSize(BITSIZE_BNU(BN_NUMBER(pU1), BN_SIZE(pU1)))))
//cpSafeMontExp_Binary(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL));
cpMontExpBin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL) );
else
//cpSafeMontExp_Window(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL), DLP_BNUCTX0(pDL));
cpMontExpWin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL), DLP_BNUCTX0(pDL));
#endif
}
/* V = (Y^U2) (mod P) */
#pragma omp section
{
#if !defined(_USE_WINDOW_EXP_)
//cpSafeMontExp_Binary(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL));
cpMontExpBin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL) );
#else
if((DLP_EXPMETHOD(pDL)==BINARY) || (1==cpMontExp_WinSize(BITSIZE_BNU(BN_NUMBER(pU2), BN_SIZE(pU2)))))
//cpSafeMontExp_Binary(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL));
cpMontExpBin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL) );
else
//cpSafeMontExp_Window(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL), DLP_BNUCTX1(pDL));
cpMontExpWin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL), DLP_BNUCTX1(pDL));
#endif
}
}
cpMontMul_BN(pV, pW, pV, DLP_MONTP0(pDL));
cpMontDec_BN(pV, pV, DLP_MONTP0(pDL));
BN_SIZE(pV) = cpMod_BNU(BN_NUMBER(pV), BN_SIZE(pV),
BN_NUMBER(pOrder), BN_SIZE(pOrder));
/* result = V~R */
*pResult = 0==cpBN_cmp(pV, pSignR)? ippDLValid : ippDLInvalidSignature;
return ippStsNoErr;
}
}
#endif /* _OPENMP */
|
6404.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp target teams distribute parallel for dist_schedule(static, 16) private(j)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_binop__bget_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int8)
// C=scalar+B GB (_bind1st__bget_int8)
// C=scalar+B' GB (_bind1st_tran__bget_int8)
// C=A+scalar GB (_bind2nd__bget_int8)
// C=A'+scalar GB (_bind2nd_tran__bget_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_BITGET (aij, bij, int8_t, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, int8_t, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT8 || GxB_NO_BGET_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bget_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, int8_t, 8) ; \
}
GrB_Info GB (_bind1st_tran__bget_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, int8_t, 8) ; \
}
GrB_Info GB (_bind2nd_tran__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix_comp.h | #ifndef MATRIX_COMP_H_
#define MATRIX_COMP_H_
namespace acspo {
template <typename T, typename S>
matrix<bool> operator==(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] == m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator!=(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] != m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator<(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] < m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator<=(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] <= m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator>(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] > m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator>=(const matrix<T> &mat1, const matrix<S> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int rows = mat1.rows(), cols = mat1.cols(), elem = mat1.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] >= m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator==(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] == val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator!=(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] != val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator<(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] < val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator<=(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] <= val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator>(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] > val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator>=(const matrix<T> &mat, const S &val)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<bool> ret(rows, cols);
bool *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] >= val;
}
return ret;
}
template <typename T, typename S>
matrix<bool> operator==(const S &val, const matrix<T> &mat)
{
return mat == val;
}
template <typename T, typename S>
matrix<bool> operator!=(const S &val, const matrix<T> &mat)
{
return mat != val;
}
template <typename T, typename S>
matrix<bool> operator<(const S &val, const matrix<T> &mat)
{
return mat > val;
}
template <typename T, typename S>
matrix<bool> operator<=(const S &val, const matrix<T> &mat)
{
return mat >= val;
}
template <typename T, typename S>
matrix<bool> operator>(const S &val, const matrix<T> &mat)
{
return mat < val;
}
template <typename T, typename S>
matrix<bool> operator>=(const S &val, const matrix<T> &mat)
{
return mat <= val;
}
}
#endif
|
core.c | /* Generated by Cython 0.29.24 */
/* BEGIN: Cython Metadata
{
"distutils": {
"name": "monotonic_align.core",
"sources": [
"core.pyx"
]
},
"module_name": "monotonic_align.core"
}
END: Cython Metadata */
#ifndef PY_SSIZE_T_CLEAN
#define PY_SSIZE_T_CLEAN
#endif /* PY_SSIZE_T_CLEAN */
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_24"
#define CYTHON_HEX_VERSION 0x001D18F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#if defined(PyUnicode_IS_READY)
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#else
#define __Pyx_PyUnicode_READY(op) (0)
#endif
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#endif
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__monotonic_align__core
#define __PYX_HAVE_API__monotonic_align__core
/* Early includes */
#include "pythread.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"core.pyx",
"stringsource",
};
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each;
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each {
int __pyx_n;
float max_neg_val;
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":279
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* None.proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* None.proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* PyObjectGetAttrStrNoError.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag);
/* GCCDiagnostics.proto */
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#define __Pyx_HAS_GCC_DIAGNOSTIC
#endif
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'monotonic_align.core' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/
static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "monotonic_align.core"
extern int __pyx_module_is_main_monotonic_align__core;
int __pyx_module_is_main_monotonic_align__core = 0;
/* Implementation of 'monotonic_align.core' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_t_xs[] = "t_xs";
static const char __pyx_k_t_ys[] = "t_ys";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_paths[] = "paths";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_values[] = "values";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_paths;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_t_xs;
static PyObject *__pyx_n_s_t_ys;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_n_s_values;
static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static float __pyx_k_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__16;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__22;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_codeobj__26;
/* Late includes */
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) {
float __pyx_v_max_neg_val = __pyx_k_;
int __pyx_v_x;
int __pyx_v_y;
float __pyx_v_v_prev;
float __pyx_v_v_cur;
int __pyx_v_index;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
long __pyx_t_4;
int __pyx_t_5;
long __pyx_t_6;
long __pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
float __pyx_t_11;
float __pyx_t_12;
float __pyx_t_13;
int __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_max_neg_val = __pyx_optional_args->max_neg_val;
}
}
/* "monotonic_align/core.pyx":13
* cdef float v_cur
* cdef float tmp
* cdef int index = t_x - 1 # <<<<<<<<<<<<<<
*
* for y in range(t_y):
*/
__pyx_v_index = (__pyx_v_t_x - 1);
/* "monotonic_align/core.pyx":15
* cdef int index = t_x - 1
*
* for y in range(t_y): # <<<<<<<<<<<<<<
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y:
*/
__pyx_t_1 = __pyx_v_t_y;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_y = __pyx_t_3;
/* "monotonic_align/core.pyx":16
*
* for y in range(t_y):
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<<
* if x == y:
* v_cur = max_neg_val
*/
__pyx_t_4 = (__pyx_v_y + 1);
__pyx_t_5 = __pyx_v_t_x;
if (((__pyx_t_4 < __pyx_t_5) != 0)) {
__pyx_t_6 = __pyx_t_4;
} else {
__pyx_t_6 = __pyx_t_5;
}
__pyx_t_4 = __pyx_t_6;
__pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y);
__pyx_t_6 = 0;
if (((__pyx_t_5 > __pyx_t_6) != 0)) {
__pyx_t_7 = __pyx_t_5;
} else {
__pyx_t_7 = __pyx_t_6;
}
__pyx_t_6 = __pyx_t_4;
for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) {
__pyx_v_x = __pyx_t_5;
/* "monotonic_align/core.pyx":17
* for y in range(t_y):
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y: # <<<<<<<<<<<<<<
* v_cur = max_neg_val
* else:
*/
__pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0);
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":18
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y:
* v_cur = max_neg_val # <<<<<<<<<<<<<<
* else:
* v_cur = value[y-1, x]
*/
__pyx_v_v_cur = __pyx_v_max_neg_val;
/* "monotonic_align/core.pyx":17
* for y in range(t_y):
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y: # <<<<<<<<<<<<<<
* v_cur = max_neg_val
* else:
*/
goto __pyx_L7;
}
/* "monotonic_align/core.pyx":20
* v_cur = max_neg_val
* else:
* v_cur = value[y-1, x] # <<<<<<<<<<<<<<
* if x == 0:
* if y == 0:
*/
/*else*/ {
__pyx_t_9 = (__pyx_v_y - 1);
__pyx_t_10 = __pyx_v_x;
__pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )));
}
__pyx_L7:;
/* "monotonic_align/core.pyx":21
* else:
* v_cur = value[y-1, x]
* if x == 0: # <<<<<<<<<<<<<<
* if y == 0:
* v_prev = 0.
*/
__pyx_t_8 = ((__pyx_v_x == 0) != 0);
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":22
* v_cur = value[y-1, x]
* if x == 0:
* if y == 0: # <<<<<<<<<<<<<<
* v_prev = 0.
* else:
*/
__pyx_t_8 = ((__pyx_v_y == 0) != 0);
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":23
* if x == 0:
* if y == 0:
* v_prev = 0. # <<<<<<<<<<<<<<
* else:
* v_prev = max_neg_val
*/
__pyx_v_v_prev = 0.;
/* "monotonic_align/core.pyx":22
* v_cur = value[y-1, x]
* if x == 0:
* if y == 0: # <<<<<<<<<<<<<<
* v_prev = 0.
* else:
*/
goto __pyx_L9;
}
/* "monotonic_align/core.pyx":25
* v_prev = 0.
* else:
* v_prev = max_neg_val # <<<<<<<<<<<<<<
* else:
* v_prev = value[y-1, x-1]
*/
/*else*/ {
__pyx_v_v_prev = __pyx_v_max_neg_val;
}
__pyx_L9:;
/* "monotonic_align/core.pyx":21
* else:
* v_cur = value[y-1, x]
* if x == 0: # <<<<<<<<<<<<<<
* if y == 0:
* v_prev = 0.
*/
goto __pyx_L8;
}
/* "monotonic_align/core.pyx":27
* v_prev = max_neg_val
* else:
* v_prev = value[y-1, x-1] # <<<<<<<<<<<<<<
* value[y, x] += max(v_prev, v_cur)
*
*/
/*else*/ {
__pyx_t_10 = (__pyx_v_y - 1);
__pyx_t_9 = (__pyx_v_x - 1);
__pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) )));
}
__pyx_L8:;
/* "monotonic_align/core.pyx":28
* else:
* v_prev = value[y-1, x-1]
* value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<<
*
* for y in range(t_y - 1, -1, -1):
*/
__pyx_t_11 = __pyx_v_v_cur;
__pyx_t_12 = __pyx_v_v_prev;
if (((__pyx_t_11 > __pyx_t_12) != 0)) {
__pyx_t_13 = __pyx_t_11;
} else {
__pyx_t_13 = __pyx_t_12;
}
__pyx_t_9 = __pyx_v_y;
__pyx_t_10 = __pyx_v_x;
*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13;
}
}
/* "monotonic_align/core.pyx":30
* value[y, x] += max(v_prev, v_cur)
*
* for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<<
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
*/
for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_y = __pyx_t_1;
/* "monotonic_align/core.pyx":31
*
* for y in range(t_y - 1, -1, -1):
* path[y, index] = 1 # <<<<<<<<<<<<<<
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
* index = index - 1
*/
__pyx_t_10 = __pyx_v_y;
__pyx_t_9 = __pyx_v_index;
*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1;
/* "monotonic_align/core.pyx":32
* for y in range(t_y - 1, -1, -1):
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<<
* index = index - 1
*
*/
__pyx_t_14 = ((__pyx_v_index != 0) != 0);
if (__pyx_t_14) {
} else {
__pyx_t_8 = __pyx_t_14;
goto __pyx_L13_bool_binop_done;
}
__pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0);
if (!__pyx_t_14) {
} else {
__pyx_t_8 = __pyx_t_14;
goto __pyx_L13_bool_binop_done;
}
__pyx_t_9 = (__pyx_v_y - 1);
__pyx_t_10 = __pyx_v_index;
__pyx_t_15 = (__pyx_v_y - 1);
__pyx_t_16 = (__pyx_v_index - 1);
__pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0);
__pyx_t_8 = __pyx_t_14;
__pyx_L13_bool_binop_done:;
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":33
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
* index = index - 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_index = (__pyx_v_index - 1);
/* "monotonic_align/core.pyx":32
* for y in range(t_y - 1, -1, -1):
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<<
* index = index - 1
*
*/
}
}
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
/* function exit code */
}
/* "monotonic_align/core.pyx":38
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<<
* cdef int b = paths.shape[0]
* cdef int i
*/
static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) {
CYTHON_UNUSED int __pyx_v_b;
int __pyx_v_i;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
__Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
/* "monotonic_align/core.pyx":39
* @cython.wraparound(False)
* cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil:
* cdef int b = paths.shape[0] # <<<<<<<<<<<<<<
* cdef int i
* for i in prange(b, nogil=True):
*/
__pyx_v_b = (__pyx_v_paths.shape[0]);
/* "monotonic_align/core.pyx":41
* cdef int b = paths.shape[0]
* cdef int i
* for i in prange(b, nogil=True): # <<<<<<<<<<<<<<
* maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i])
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_1 = __pyx_v_b;
if ((1 == 0)) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_3 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i)
#endif /* _OPENMP */
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_2);
/* "monotonic_align/core.pyx":42
* cdef int i
* for i in prange(b, nogil=True):
* maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<<
*/
__pyx_t_4.data = __pyx_v_paths.data;
__pyx_t_4.memview = __pyx_v_paths.memview;
__PYX_INC_MEMVIEW(&__pyx_t_4, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_i;
Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0];
__pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_4.shape[0] = __pyx_v_paths.shape[1];
__pyx_t_4.strides[0] = __pyx_v_paths.strides[1];
__pyx_t_4.suboffsets[0] = -1;
__pyx_t_4.shape[1] = __pyx_v_paths.shape[2];
__pyx_t_4.strides[1] = __pyx_v_paths.strides[2];
__pyx_t_4.suboffsets[1] = -1;
__pyx_t_5.data = __pyx_v_values.data;
__pyx_t_5.memview = __pyx_v_values.memview;
__PYX_INC_MEMVIEW(&__pyx_t_5, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_i;
Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0];
__pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_5.shape[0] = __pyx_v_values.shape[1];
__pyx_t_5.strides[0] = __pyx_v_values.strides[1];
__pyx_t_5.suboffsets[0] = -1;
__pyx_t_5.shape[1] = __pyx_v_values.shape[2];
__pyx_t_5.strides[1] = __pyx_v_values.strides[2];
__pyx_t_5.suboffsets[1] = -1;
__pyx_t_6 = __pyx_v_i;
__pyx_t_7 = __pyx_v_i;
__pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL);
__PYX_XDEC_MEMVIEW(&__pyx_t_4, 0);
__pyx_t_4.memview = NULL;
__pyx_t_4.data = NULL;
__PYX_XDEC_MEMVIEW(&__pyx_t_5, 0);
__pyx_t_5.memview = NULL;
__pyx_t_5.data = NULL;
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "monotonic_align/core.pyx":41
* cdef int b = paths.shape[0]
* cdef int i
* for i in prange(b, nogil=True): # <<<<<<<<<<<<<<
* maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i])
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "monotonic_align/core.pyx":38
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<<
* cdef int b = paths.shape[0]
* cdef int i
*/
/* function exit code */
}
/* Python wrapper */
static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0};
PyObject* values[4] = {0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
}
__pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("maximum_path_c", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) }
if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) }
if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) }
if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) }
__pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_values, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error)
} else {
/* "View.MemoryView":123
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":129
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 129, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":130
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 133, __pyx_L1_error)
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 136, __pyx_L1_error)
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":139
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":140
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error)
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":141
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(1, 141, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_7;
/* "View.MemoryView":144
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":145
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 148, __pyx_L1_error)
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_8 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_9;
__pyx_v_idx = __pyx_t_8;
__pyx_t_8 = (__pyx_t_8 + 1);
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":153
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
__pyx_t_5 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 153, __pyx_L1_error)
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":154
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":158
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":159
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":161
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":162
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":164
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 164, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":166
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":169
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 176, __pyx_L1_error)
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":179
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":180
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 180, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 180, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
__pyx_t_9 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":181
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":182
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":186
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":188
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 192, __pyx_L1_error)
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":193
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":194
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":195
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":196
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":197
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":198
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":199
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":200
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":203
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":205
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":207
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":216
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":218
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":219
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":223
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":227
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":228
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":231
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":234
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":237
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":240
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":249
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":252
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error)
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":253
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":255
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":282
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":284
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":13
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":300
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":304
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":307
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":309
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":346
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":347
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":349
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error)
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":351
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":352
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":356
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":357
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":359
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":361
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error)
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":364
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":366
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":368
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":370
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":374
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
goto __pyx_L3;
}
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
__pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":377
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
* (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<<
* Py_DECREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
/* "View.MemoryView":378
*
* (<__pyx_buffer *> &self.view).obj = NULL
* Py_DECREF(Py_None) # <<<<<<<<<<<<<<
*
* cdef int i
*/
Py_DECREF(Py_None);
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
}
__pyx_L3:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":385
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":388
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":387
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":389
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":391
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":395
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 397, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":398
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":400
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":405
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":407
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 407, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":411
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":413
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":414
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 418, __pyx_L1_error)
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":420
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 420, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":423
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":425
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":427
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":429
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":435
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":436
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":437
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":439
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
__Pyx_memviewslice *__pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error)
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":446
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error)
__pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error)
/* "View.MemoryView":447
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
char const *__pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":451
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":456
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error)
__pyx_v_dst_slice = __pyx_t_1;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":459
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_2 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":461
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error)
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":462
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":464
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":466
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_2 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_2) {
/* "View.MemoryView":468
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":470
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L8:;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":475
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":476
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":479
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__Pyx_XGOTREF(__pyx_t_12);
__pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":482
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":483
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":488
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":491
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":493
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":498
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":499
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":494
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 495, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":504
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":510
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":512
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 514, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 520, __pyx_L1_error)
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":523
*
* if flags & PyBUF_ND:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":525
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":528
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":530
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":533
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":535
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":538
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":540
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":542
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":543
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":544
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":545
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":546
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":547
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":555
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error)
/* "View.MemoryView":556
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":560
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":564
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 570, __pyx_L1_error)
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":572
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":579
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":583
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":587
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":591
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":596
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":598
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":599
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":601
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":603
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":607
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":609
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":613
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":616
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":622
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":623
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":628
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":629
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":633
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":635
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":636
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":641
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":645
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":647
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":648
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":653
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":658
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":659
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":660
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":664
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":672
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":674
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":676
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":677
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":678
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 679, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":683
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":685
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":686
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":689
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(1, 689, __pyx_L1_error)
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":691
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":692
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":694
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":696
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":698
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":701
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 703, __pyx_L1_error)
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":711
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":718
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":722
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 722, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":725
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":726
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":728
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":729
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":735
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":736
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":741
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":742
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 746, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":751
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error)
/* "View.MemoryView":748
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error)
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":755
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":756
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":757
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":758
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":760
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":761
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":762
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":764
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":765
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":766
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":768
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error)
/* "View.MemoryView":774
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":778
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) }
/* "View.MemoryView":779
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) }
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":783
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":830
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":832
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error)
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":835
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":838
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error)
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":845
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":848
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":850
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":855
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":859
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":863
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":866
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":868
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":871
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":875
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":878
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":881
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":884
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":885
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":886
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":890
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":892
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":897
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":899
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":900
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":902
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":904
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":912
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":913
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":917
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 917, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 917, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":918
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":920
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":921
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":923
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":926
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":928
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 928, __pyx_L1_error)
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":931
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 931, __pyx_L1_error)
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":933
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":935
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":937
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":944
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":946
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":947
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":951
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":952
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":953
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":954
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":957
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error)
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":959
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":977
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":981
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":983
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":987
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error)
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":989
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":993
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1008
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1013
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1015
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1016
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1018
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1019
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1021
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1022
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1023
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1024
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1025
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1028
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1030
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1032
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1033
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1036
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1037
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1039
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1040
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1042
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1043
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1044
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1046
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1047
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1049
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1056
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1057
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1059
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1060
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1067
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1068
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1069
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1071
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1072
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1074
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1075
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1076
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1077
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1083
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1084
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1095
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1096
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1098
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1099
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1101
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1103
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1111
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1113
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1121
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1122
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1124
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1126
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1127
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1129
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1131
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1132
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1135
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1137
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1147
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1148
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1149
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1150
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1154
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1155
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1157
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1158
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1159
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1160
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1162
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1163
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1167
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1168
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1173
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
/* "View.MemoryView":1179
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for shape in src.shape[:ndim]:
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1181
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*
* for shape in src.shape[:ndim]: # <<<<<<<<<<<<<<
* size *= shape
*
*/
__pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim);
for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_shape = (__pyx_t_2[0]);
/* "View.MemoryView":1182
*
* for shape in src.shape[:ndim]:
* size *= shape # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * __pyx_v_shape);
}
/* "View.MemoryView":1184
* size *= shape
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1197
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1198
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1199
* for idx in range(ndim):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1201
* stride *= shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1202
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1203
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1205
* stride *= shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1219
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1220
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1222
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1224
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error)
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1227
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1228
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1229
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1230
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1231
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1233
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1237
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1239
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1242
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1244
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1246
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1254
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1253
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 1253, __pyx_L1_error)
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1258
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1258, __pyx_L1_error)
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1263
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1263, __pyx_L1_error)
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1265
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1265, __pyx_L1_error)
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1276
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1277
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1279
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1280
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1281
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1285
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1287
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1289
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1291
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1294
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1295
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1297
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error)
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1305
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1307
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1308
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1314
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1316
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1320
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1321
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1322
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1323
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1324
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1329
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error)
/* "View.MemoryView":1330
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error)
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1332
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1333
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1334
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1336
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1337
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1344
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1346
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1347
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1348
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1349
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1351
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1352
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1353
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1354
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1367
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1374
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1381
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1384
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1386
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1388
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1389
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1391
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1400
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1401
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1403
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1411
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1412
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1415
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1416
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1417
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1419
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1420
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1422
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":14
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_array___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryview___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryviewslice___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0},
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_core(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_core},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"core",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1},
{&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__22);
__Pyx_GIVEREF(__pyx_tuple__22);
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
__pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_array.tp_print = 0;
#endif
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_MemviewEnum.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryview.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryviewslice.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initcore(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_core(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
static PyThread_type_lock __pyx_t_2[8];
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
PyEval_InitThreads();
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_monotonic_align__core) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "monotonic_align.core")) {
if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
(void)__Pyx_modinit_type_import_code();
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
__pyx_k_ = (-1e9);
/* "monotonic_align/core.pyx":1
* cimport cython # <<<<<<<<<<<<<<
* from cython.parallel import prange
*
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":209
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":316
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":317
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_2[0] = PyThread_allocate_lock();
__pyx_t_2[1] = PyThread_allocate_lock();
__pyx_t_2[2] = PyThread_allocate_lock();
__pyx_t_2[3] = PyThread_allocate_lock();
__pyx_t_2[4] = PyThread_allocate_lock();
__pyx_t_2[5] = PyThread_allocate_lock();
__pyx_t_2[6] = PyThread_allocate_lock();
__pyx_t_2[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":549
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":995
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init monotonic_align.core");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (unlikely(memviewslice->memview || memviewslice->data)) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None))
return;
if (unlikely(__pyx_get_slice_count(memview) < 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (unlikely(first_time)) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None)) {
memslice->memview = NULL;
return;
}
if (unlikely(__pyx_get_slice_count(memview) <= 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (unlikely(last_time)) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = Py_TYPE(func)->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (__Pyx_PyFastCFunction_Check(func)) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* None */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (unlikely(stop <= start))
return __Pyx_NewRef(__pyx_empty_unicode);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* PyObjectGetAttrStrNoError */
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case '?': return "'bool'";
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number, ndim;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ndim = ctx->head->field->type->ndim;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) &&
(ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (unlikely(buf->strides[dim] != sizeof(void *))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (unlikely(buf->strides[dim] != buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (unlikely(stride < buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (unlikely(buf->suboffsets)) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (unlikely(buf->ndim != ndim)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
}
if (unlikely((unsigned) buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->len > 0) {
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
goto fail;
if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
goto fail;
}
if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
goto fail;
}
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3,
&__Pyx_TypeInfo_int, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3,
&__Pyx_TypeInfo_float, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1,
&__Pyx_TypeInfo_int, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (unlikely(from_mvs->suboffsets[i] >= 0)) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const char neg_one = (char) -1, const_zero = (char) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
mandeltr.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#include<omp.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
static PyObject *mandeltr(PyObject *self, PyObject *args, PyObject *keywd);
static PyObject *mandeltr(PyObject *self, PyObject *args, PyObject *keywd)
{
PyObject *etc;
PyArrayObject *t, *y, *params;
npy_intp dims[1];
static char *kwlist[] = {"params","t","etc",NULL};
if(!PyArg_ParseTupleAndKeywords(args,keywd,"OO|O",kwlist, \
¶ms,&t,&etc))
{
return NULL;
}
double midpt,rprs,cosi,ars,flux;
double p,z,k0,k1,mod;
midpt = IND(params,0);
rprs = IND(params,1);
cosi = IND(params,2);
ars = IND(params,3);
flux = IND(params,4);
p = IND(params,5);
dims[0] = t->dimensions[0];
y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
int i;
#pragma omp parallel for private(z,k0,k1)
for(i=0;i<dims[0];i++)
{
IND(y,i) = 1;
mod = (IND(t,i)-midpt)-floor((IND(t,i)-midpt)/p)*p;
if((mod > p/4.) && (mod < 3*p/4.))
{
z = ars;
}
else
{
z = ars*sqrt(pow(sin(2*M_PI*(IND(t,i)-midpt)/p),2)+pow((cosi*cos(2*M_PI \
*(IND(t,i)-midpt)/p)),2));
}
if(z<=(1-rprs))
{
IND(y,i) = 1-pow(rprs,2);
}
if(z>(1-rprs)&&z<=(1+rprs))
{
k0 = acos((rprs*rprs+z*z-1)/2/rprs/z);
k1 = acos((1-rprs*rprs+z*z)/2/z);
IND(y,i) = 1-1/M_PI*(k0*rprs*rprs+k1-sqrt((4*z*z-\
pow((1+z*z-rprs*rprs),2))/4));
}
IND(y,i) *= flux;
}
return PyArray_Return(y);
}
static char module_docstring[] ="This function computes the primary transit shape using equations provided by Mandel & Agol (2002)\n\
\n\
Parameters\n\
----------\n\
midpt: Center of eclipse\n\
rprs: Planet radius / stellar radius\n\
cosi: Cosine of the inclination\n\
ars: Semi-major axis / stellar radius\n\
flux: Flux offset from 0\n\
t: Array of phase/time points\n\
p: Period in same units as t\n\
\n\
Returns\n\
-------\n\
This function returns the flux for each point in t.\n\
\n\
Revisions\n\
---------\n\
2010-11-27 Kevin Stevenson, UCF\n\
kevin218@knights.ucf.edu\n\
Original version\n\
2010-12-19 Nate Lust, UCF\n\
natelust at linux dot com\n\
converted function to c\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated c extensions to python3, with support for python2.7\n\
";
static PyMethodDef module_methods[]={
{"mandeltr",mandeltr,METH_VARARGS|METH_KEYWORDS,module_docstring}, {NULL}};
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_mandeltr(void)
#else
initmandeltr(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"mandeltr", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("mandeltr", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
clause-1.c | /* { dg-do compile } */
/* { dg-require-effective-target tls } */
#define p parallel
extern void bar (void);
extern char q[];
int t;
#pragma omp threadprivate (t)
void
foo (int x)
{
char *pp;
struct S { int i; int j; } s;
char a[32];
double d;
int i;
const int c = 8;
#pragma omp p shared (x, x) /* { dg-error "more than once" } */
;
#pragma omp p private (x) private (x) /* { dg-error "more than once" } */
;
#pragma omp p shared (x) firstprivate (x) /* { dg-error "more than once" } */
;
#pragma omp p firstprivate (x, x) /* { dg-error "more than once" } */
;
#pragma omp p for shared (x) lastprivate (x) /* { dg-error "more than" } */
for (i = 0; i < 10; i++)
;
#pragma omp p for private (x) lastprivate (x) /* { dg-error "more than" } */
for (i = 0; i < 10; i++)
;
#pragma omp p for lastprivate (x, x) /* { dg-error "more than once" } */
for (i = 0; i < 10; i++)
;
#pragma omp p for linear (x, x) /* { dg-error "more than once" } */
for (i = 0; i < 10; i++)
;
#pragma omp single private (x) copyprivate (x) /* { dg-error "more than" } */
;
#pragma omp p shared (bar) /* { dg-error "is not a variable" } */
;
#pragma omp p private (bar) /* { dg-error "is not a variable" } */
;
#pragma omp p firstprivate (bar) /* { dg-error "is not a variable" } */
;
#pragma omp p reduction (+:pp) /* { dg-error "user defined reduction not found for" } */
;
#pragma omp p reduction (*:s) /* { dg-error "user defined reduction not found for" } */
;
#pragma omp p reduction (-:a)
;
d = 0;
#pragma omp p reduction (*:d)
;
#pragma omp p reduction (|:d) /* { dg-error "has invalid type for" } */
;
#pragma omp p reduction (&&:d) /* { dg-error "has invalid type for" } */
;
#pragma omp p copyin (d) /* { dg-error "must be 'threadprivate'" } */
;
#pragma omp p copyin (x) /* { dg-error "must be 'threadprivate'" } */
;
#pragma omp p for firstprivate (x) lastprivate (x)
for (i = 0; i < 10; i++)
;
#pragma omp p private (q) /* { dg-error "incomplete type" } */
;
#pragma omp p firstprivate (q) /* { dg-error "incomplete type" } */
;
#pragma omp p for lastprivate (q) /* { dg-error "incomplete type" } */
for (i = 0; i < 10; i++)
;
#pragma omp p shared (t) /* { dg-error "predetermined 'threadprivate'" } */
;
#pragma omp p private (t) /* { dg-error "predetermined 'threadprivate'" } */
;
#pragma omp p firstprivate (t) /* { dg-error "predetermined 'threadpriv" } */
;
#pragma omp p for lastprivate (t) /* { dg-error "predetermined 'threadpr" } */
for (i = 0; i < 10; i++)
;
#pragma omp p reduction (*:t) /* { dg-error "predetermined 'threadprivate" } */
;
#pragma omp p for linear (t) /* { dg-error "predetermined 'threadprivate" } */
for (i = 0; i < 10; i++)
;
#pragma omp p shared (c) /* { dg-error "predetermined 'shared'" } */
;
#pragma omp p private (c) /* { dg-error "predetermined 'shared'" } */
;
#pragma omp p firstprivate (c)
;
#pragma omp p for lastprivate (c) /* { dg-error "predetermined 'shared'" } */
for (i = 0; i < 10; i++)
;
#pragma omp p reduction (*:c) /* { dg-error "predetermined 'shared'" } */
;
#pragma omp p for linear (c) /* { dg-error "predetermined 'shared'" } */
for (i = 0; i < 10; i++)
;
}
|
grid.c |
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <float.h>
#include <fcntl.h>
#include <stdint.h>
#include <complex.h>
#include <fftw3.h>
#include <omp.h>
#include <fenv.h>
#ifdef __AVX2__
#include <immintrin.h>
#endif
#include "grid.h"
// Assume all uvw are zero. This eliminates all coordinate
// calculations and grids only into the middle.
//#define ASSUME_UVW_0
inline static int coord(int grid_size, double theta,
struct bl_data *bl_data,
int time, int freq) {
#ifdef ASSUME_UVW_0
int x = 0, y = 0;
#else
int x = (int)floor(theta * uvw_lambda(bl_data, time, freq, 0) + .5);
int y = (int)floor(theta * uvw_lambda(bl_data, time, freq, 1) + .5);
#endif
return (y+grid_size/2) * grid_size + (x+grid_size/2);
}
inline static void _frac_coord(int grid_size, int oversample,
double u, int *x, int *fx) {
// Round to nearest oversampling value. We assume the kernel to be
// in "natural" order, so what we are looking for is the best
// "x - fx / oversample"
// approximation for "grid_size/2 + u".
fesetround(3);
int ox = lrint((grid_size / 2 - u) * oversample);
*x = grid_size-(ox / oversample);
*fx = ox % oversample;
}
void frac_coord(int grid_size, int oversample,
double u, int *x, int *fx) {
_frac_coord(grid_size, oversample, u, x, fx);
}
// Fractional coordinate calculation for oversampled 2D kernel
inline static void frac_coord_2d(int grid_size, int kernel_size, int oversample,
double theta,
struct bl_data *bl_data,
int time, int freq,
double d_u, double d_v,
int *grid_offset, int *sub_offset) {
// Convert UVW coordinates into grid coordinates. This needs to
// take frequency scaling, the field of view size (= grid
// resolution) as well as the grid centre into account
#ifdef ASSUME_UVW_0
double x = 0, y = 0;
#else
double x = theta * (uvw_lambda(bl_data, time, freq, 0) - d_u);
double y = theta * (uvw_lambda(bl_data, time, freq, 1) - d_v);
#endif
// Find fractional coordinates
int ix, iy, ixf, iyf;
_frac_coord(grid_size, oversample, x, &ix, &ixf);
_frac_coord(grid_size, oversample, y, &iy, &iyf);
// Calculate grid and oversampled kernel offsets
*grid_offset = (iy-kernel_size/2)*grid_size + (ix-kernel_size/2);
*sub_offset = kernel_size * kernel_size * (iyf*oversample + ixf);
}
// Fractional coordinate calculation for separable 1D kernel
inline static void frac_coord_sep_uv(int grid_size, int kernel_size, int oversample,
double theta,
double u, double v,
int *grid_offset,
int *sub_offset_x, int *sub_offset_y)
{
double x = theta * u, y = theta * v;
// Find fractional coordinates
int ix, iy, ixf, iyf;
_frac_coord(grid_size, oversample, x, &ix, &ixf);
_frac_coord(grid_size, oversample, y, &iy, &iyf);
// Calculate grid and oversampled kernel offsets
*grid_offset = (iy-kernel_size/2)*grid_size + (ix-kernel_size/2);
*sub_offset_x = kernel_size * ixf;
*sub_offset_y = kernel_size * iyf;
}
inline static void frac_coord_sep(int grid_size, int kernel_size, int oversample,
double theta,
struct bl_data *bl_data,
int time, int freq,
double d_u, double d_v,
int *grid_offset,
int *sub_offset_x, int *sub_offset_y)
{
#ifdef ASSUME_UVW_0
double u = 0, v = 0;
#else
double u = uvw_lambda(bl_data, time, freq, 0) - d_u;
double v = uvw_lambda(bl_data, time, freq, 1) - d_v;
#endif
frac_coord_sep_uv(grid_size, kernel_size, oversample, theta, u, v,
grid_offset, sub_offset_x, sub_offset_y);
}
void weight(unsigned int *wgrid, int grid_size, double theta,
struct vis_data *vis) {
// Simple uniform weighting
int bl, time, freq;
memset(wgrid, 0, grid_size * grid_size * sizeof(unsigned int));
for (bl = 0; bl < vis->bl_count; bl++) {
for (time = 0; time < vis->bl[bl].time_count; time++) {
for (freq = 0; freq < vis->bl[bl].freq_count; freq++) {
wgrid[coord(grid_size, theta, &vis->bl[bl], time, freq)]++;
}
}
}
for (bl = 0; bl < vis->bl_count; bl++) {
for (time = 0; time < vis->bl[bl].time_count; time++) {
for (freq = 0; freq < vis->bl[bl].freq_count; freq++) {
vis->bl[bl].vis[time*vis->bl[bl].freq_count + freq]
/= wgrid[coord(grid_size, theta, &vis->bl[bl], time, freq)];
}
}
}
}
uint64_t degrid_simple(double complex *uvgrid, int grid_size, double theta,
struct vis_data *vis) {
uint64_t flops = 0;
int bl, time, freq;
for (bl = 0; bl < vis->bl_count; bl++) {
for (time = 0; time < vis->bl[bl].time_count; time++) {
for (freq = 0; freq < vis->bl[bl].freq_count; freq++) {
vis->bl[bl].vis[time*vis->bl[bl].freq_count + freq]
= uvgrid[coord(grid_size, theta, &vis->bl[bl], time, freq)];
flops += 2;
}
}
}
return flops;
}
uint64_t grid_simple(double complex *uvgrid, int grid_size, double theta,
struct vis_data *vis) {
uint64_t flops = 0;
int bl, time, freq;
for (bl = 0; bl < vis->bl_count; bl++) {
for (time = 0; time < vis->bl[bl].time_count; time++) {
for (freq = 0; freq < vis->bl[bl].freq_count; freq++) {
uvgrid[coord(grid_size, theta, &vis->bl[bl], time, freq)]
+= vis->bl[bl].vis[time*vis->bl[bl].freq_count + freq];
flops += 2;
}
}
}
return flops;
}
double complex degrid_conv_uv(double complex *uvgrid, int grid_size, double theta,
double u, double v,
struct sep_kernel_data *kernel,
uint64_t *flops)
{
// Calculate grid and sub-grid coordinates
int grid_offset, sub_offset_x, sub_offset_y;
frac_coord_sep_uv(grid_size, kernel->size, kernel->oversampling,
theta, u, v,
&grid_offset, &sub_offset_x, &sub_offset_y);
#ifndef __AVX2__
// Get visibility
double complex vis = 0;
int y, x;
for (y = 0; y < kernel->size; y++) {
double complex visy = 0;
for (x = 0; x < kernel->size; x++)
visy += kernel->data[sub_offset_x + x] *
uvgrid[grid_offset + y*grid_size + x];
vis += kernel->data[sub_offset_y + y] * visy;
}
*flops += 4 * (1 + kernel->size) * kernel->size;
return vis;
#else
// Get visibility
assert(kernel->size % 2 == 0);
double test[4];
__m256d vis = _mm256_setzero_pd();
int y, x;
for (y = 0; y < kernel->size; y += 1) {
__m256d sum = _mm256_setzero_pd();
for (x = 0; x < kernel->size; x += 2) {
double *pk = kernel->data + sub_offset_x + x;
__m256d kern = _mm256_setr_pd(*pk, *pk, *(pk+1), *(pk+1));
__m256d grid = _mm256_loadu_pd((double *)(uvgrid + grid_offset + y*grid_size + x));
sum = _mm256_fmadd_pd(kern, grid, sum);
}
double kern_y = kernel->data[sub_offset_y + y];
vis = _mm256_fmadd_pd(sum, _mm256_set1_pd(kern_y), vis);
}
__attribute__ ((aligned (32))) double vis_s[4];
_mm256_store_pd(vis_s, vis);
double complex vis_res = vis_s[0] + vis_s[2] + 1j * (vis_s[1] + vis_s[3]);
*flops += 4 * (1 + kernel->size) * kernel->size;
return vis_res;
#endif
}
uint64_t degrid_conv_bl(double complex *uvgrid, int grid_size, double theta,
double d_u, double d_v,
double min_u, double max_u, double min_v, double max_v,
struct bl_data *bl,
int time0, int time1, int freq0, int freq1,
struct sep_kernel_data *kernel)
{
uint64_t flops = 0;
int time, freq;
for (time = time0; time < time1; time++) {
for (freq = freq0; freq < freq1; freq++) {
// Bounds check
double u = uvw_lambda(bl, time, freq, 0);
if (u < min_u || u >= max_u) continue;
double v = uvw_lambda(bl, time, freq, 1);
if (v < min_v || v >= max_v) continue;
bl->vis[(time-time0)*(freq1 - freq0) + freq-freq0] =
degrid_conv_uv(uvgrid, grid_size, theta,
u-d_u, v-d_v, kernel, &flops);
}
}
return flops;
}
uint64_t degrid_conv(double complex *uvgrid, int grid_size, double theta, double d_u, double d_v,
struct vis_data *vis, struct sep_kernel_data *kernel)
{
uint64_t flops = 0;
int bl;
for (bl = 0; bl < vis->bl_count; bl++) {
flops += degrid_conv_bl(uvgrid, grid_size, theta,
d_u, d_v,
d_u-grid_size/theta/2, d_u+grid_size/theta/2,
d_v-grid_size/theta/2, d_v+grid_size/theta/2,
&vis->bl[bl], 0, vis->bl[bl].time_count, 0, vis->bl[bl].freq_count,
kernel);
}
return flops;
}
inline static uint64_t w_project(double complex *uvgrid, int grid_size, double theta,
int time, int freq,
double d_u, double d_v, double d_w,
struct bl_data *bl, struct w_kernel_data *wkern) {
// Calculate grid and sub-grid coordinates
int grid_offset, sub_offset;
frac_coord_2d(grid_size, wkern->size_x, wkern->oversampling,
theta, bl, time, freq, d_u, d_v,
&grid_offset, &sub_offset);
// Determine w-kernel to use
double w = uvw_lambda(bl, time, freq, 2) - d_w;
int w_plane = (int)floor((w - wkern->w_min) / wkern->w_step + .5);
double complex *wk = wkern->kern_by_w[w_plane].data;
// Get visibility
double complex v = bl->vis[time*bl->freq_count+freq];
// Copy kernel
int x, y;
int wkern_size = wkern->size_x;
assert(wkern->size_y == wkern_size);
for (y = 0; y < wkern_size; y++) {
for (x = 0; x < wkern_size; x++) {
uvgrid[grid_offset + y*grid_size + x]
+= v * conj(wk[sub_offset + y*wkern_size + x]);
}
}
return 8 * wkern->size_x * wkern->size_y;
}
uint64_t grid_wprojection(double complex *uvgrid, int grid_size, double theta,
struct vis_data *vis, struct w_kernel_data *wkern) {
uint64_t flops = 0;
int bl, time, freq;
for (bl = 0; bl < vis->bl_count; bl++) {
for (time = 0; time < vis->bl[bl].time_count; time++) {
for (freq = 0; freq < vis->bl[bl].freq_count; freq++) {
flops += w_project(uvgrid, grid_size, theta, time, freq,
0, 0, 0, &vis->bl[bl], wkern);
}
}
}
return flops;
}
static inline double lambda_min(struct bl_data *bl_data, double u) {
return u * (u < 0 ? bl_data->f_max : bl_data->f_min) / c;
}
static inline double lambda_max(struct bl_data *bl_data, double u) {
return u * (u < 0 ? bl_data->f_min : bl_data->f_max) / c;
}
static uint64_t w_project_bin(double complex *subgrid, int subgrid_size, double theta,
struct bl_data **bl_bin, int bl_count,
struct w_kernel_data *wkern,
double u_min, double u_max, double u_mid,
double v_min, double v_max, double v_mid,
double w_min, double w_max, double w_mid) {
int bl, time, freq;
uint64_t all_flops = 0;
for (bl = 0; bl < bl_count; bl++) {
struct bl_data *bl_data = bl_bin[bl];
// Baseline cannot possible overlap uvw bin?
if (lambda_max(bl_data, bl_data->u_max) < u_min ||
lambda_min(bl_data, bl_data->u_min) >= u_max ||
lambda_max(bl_data, bl_data->v_max) < v_min ||
lambda_min(bl_data, bl_data->v_min) >= v_max ||
lambda_max(bl_data, bl_data->w_max) < w_min ||
lambda_min(bl_data, bl_data->w_min) >= w_max) {
// Skip
continue;
}
// Then go through individual visibilities
uint64_t flops = 0;
for (time = 0; time < bl_data->time_count; time++) {
for (freq = 0; freq < bl_data->freq_count; freq++) {
// Fine bounds check
double u = uvw_lambda(bl_data, time, freq, 0);
double v = uvw_lambda(bl_data, time, freq, 1);
double w = uvw_lambda(bl_data, time, freq, 2);
if (u < u_min || u >= u_max ||
v < v_min || v >= v_max ||
w < w_min || w >= w_max) {
continue;
}
// w-project the last mile (TODO: special case for wp=0)
flops += w_project(subgrid, subgrid_size, theta,
time, freq,
u_mid, v_mid, w_mid,
bl_data, wkern);
}
}
#pragma omp atomic
bl_data->flops += flops;
all_flops += flops;
}
return all_flops;
}
// How is this not in the standard library somewhere?
// (stolen from Stack Overflow)
inline static double complex cipow(double complex base, int exp)
{
double complex result = 1;
if (exp < 0) return 1 / cipow(base, -exp);
if (exp == 1) return base;
while (exp)
{
if (exp & 1)
result *= base;
exp >>= 1;
base *= base;
}
return result;
}
inline static int cipow_flops(int exp)
{
// Popcount, basically. Yes, there are really clever algorithms for
// this *and* a builtin, but we are more interested in clarity here.
if (exp < 0) return cipow_flops(exp);
int flops = 0;
while(exp)
{
if (exp & 1) flops+=6;
exp >>= 1;
}
return flops;
}
double complex *make_wtransfer(double theta, double wincrement,
int subgrid_size, int fsample_size,
uint64_t *flops)
{
if(fsample_size < subgrid_size) {
fprintf(stderr, "Warning: Increasing Fresnel sampling to sub-grid resolution!");
fsample_size = subgrid_size;
}
// Allocate grid for Fresnel sampling / w kernel
int fsample_mem_size = sizeof(double complex) * fsample_size * fsample_size;
int subgrid_mem_size = sizeof(double complex) * subgrid_size * subgrid_size;
double complex *wtransfer = (double complex *)malloc(subgrid_mem_size);
double complex *fsample = wtransfer;
fftw_plan ff_fft_plan = 0, ff_ifft_plan = 0;
if (fsample_size > subgrid_size) {
// If we need to down-sample the Fresnel pattern to the
// subgrid allocate a separate buffer
fsample = (double complex *)malloc(fsample_mem_size);
ff_fft_plan = fftw_plan_dft_2d(fsample_size, fsample_size, fsample, fsample, -1, FFTW_MEASURE);
ff_ifft_plan = fftw_plan_dft_2d(subgrid_size, subgrid_size, wtransfer, wtransfer, +1, FFTW_MEASURE);
}
// Sample
int x, y;
for (y = 0; y < fsample_size; y++) {
for (x = 0; x < fsample_size; x++) {
double l = theta * (double)(x - fsample_size / 2) / fsample_size;
double m = theta * (double)(y - fsample_size / 2) / fsample_size;
double ph = 2 * M_PI * wincrement * (1 - sqrt(1 - l*l - m*m));
fsample[y * fsample_size + x] = cos(ph) + I * sin(ph);
}
}
*flops += 10 * fsample_size * fsample_size;
// Move zero image position to (0,0). This is going to be the
// convention for subimg, it simplifies the (frequent!) FFTs.
fft_shift(fsample, fsample_size);
// Downsample Fresnel pattern
if (fsample_size > subgrid_size) {
fftw_execute(ff_fft_plan); fftw_free(ff_fft_plan);
*flops += (int) ceil(5 * fsample_size * fsample_size * log(fsample_size * fsample_size) / log(2) );
fft_shift(fsample, fsample_size);
for (y = 0; y < subgrid_size; y++) {
for (x = 0; x < subgrid_size; x++) {
int y2 = y - subgrid_size/2 + fsample_size/2;
int x2 = x - subgrid_size/2 + fsample_size/2;
wtransfer[y * subgrid_size + x] = fsample[y2 * fsample_size + x2] / fsample_size / fsample_size;
}
}
free(fsample);
fft_shift(wtransfer, subgrid_size);
fftw_execute(ff_ifft_plan); fftw_free(ff_ifft_plan);
*flops += (int) ceil(5 * subgrid_size * subgrid_size * log(subgrid_size * subgrid_size) / log(2) );
}
return wtransfer;
}
uint64_t grid_wtowers(double complex *uvgrid, int grid_size,
double theta,
struct vis_data *vis, struct w_kernel_data *wkern,
int subgrid_size, int fsample_size,
int subgrid_margin, double wincrement) {
// Check conditions
if(subgrid_margin < wkern->size_x) {
fprintf(stderr, "Error: The margin is too small for this w-kernel size!\n");
exit(1);
}
assert(wkern->size_x == wkern->size_y);
if(wkern->w_min > -wincrement / 2 || wkern->w_max < wincrement / 2) {
fprintf(stderr, "Error: The w-kernel does not cover a whole w-increment!\n");
exit(1);
}
assert(subgrid_size % 2 == 0 && subgrid_margin % 2 == 0); // Not sure whether it works otherwise
// Make w transfer pattern
uint64_t flops = 0;
double complex *wtransfer = make_wtransfer(theta, wincrement, subgrid_size, fsample_size, &flops);
// Determine bounds in w
double vis_w_min = 0, vis_w_max = 0;
int bl;
for (bl = 0; bl < vis->bl_count; bl++) {
double w_min = lambda_min(&vis->bl[bl], vis->bl[bl].w_min);
double w_max = lambda_max(&vis->bl[bl], vis->bl[bl].w_max);
if (w_min < vis_w_min) { vis_w_min = w_min; }
if (w_max > vis_w_max) { vis_w_max = w_max; }
}
int wp_min = (int) floor(vis_w_min / wincrement + 0.5);
int wp_max = (int) floor(vis_w_max / wincrement + 0.5);
// Bin in uv
int chunk_size = subgrid_size - subgrid_margin;
int chunk_count = grid_size / chunk_size + 1;
int bins_size = sizeof(void *) * chunk_count * chunk_count;
struct bl_data ***bins = (struct bl_data ***)malloc(bins_size);
memset(bins, 0, bins_size);
int bins_count_size = sizeof(int) * chunk_count * chunk_count;
int *bins_count = (int *)malloc(bins_count_size);
memset(bins_count, 0, bins_count_size);
for (bl = 0; bl < vis->bl_count; bl++) {
// Determine bounds (could be more precise, future work...)
struct bl_data *bl_data = &vis->bl[bl];
double u_min = lambda_min(bl_data, bl_data->u_min);
double u_max = lambda_max(bl_data, bl_data->u_max);
double v_min = lambda_min(bl_data, bl_data->v_min);
double v_max = lambda_max(bl_data, bl_data->v_max);
// Determine first/last overlapping grid chunks
int cx0 = (floor(u_min * theta + 0.5) + grid_size/2) / chunk_size;
int cx1 = (floor(u_max * theta + 0.5) + grid_size/2) / chunk_size;
int cy0 = (floor(v_min * theta + 0.5) + grid_size/2) / chunk_size;
int cy1 = (floor(v_max * theta + 0.5) + grid_size/2) / chunk_size;
int cy, cx;
for (cy = cy0; cy <= cy1; cy++) {
for (cx = cx0; cx <= cx1; cx++) {
// Lazy dynamically sized vector
int bcount = ++bins_count[cy*chunk_count + cx];
bins[cy*chunk_count + cx] =
(struct bl_data **)realloc(bins[cy*chunk_count + cx], sizeof(void *) * bcount);
bins[cy*chunk_count + cx][bcount-1] = bl_data;
}
}
}
// Initialise FLOP counts. We will do a lot of subgrid FFTs, so cache
// its approximate cost.
int subgrid_cells = subgrid_size * subgrid_size;
uint64_t subgrid_fft_flops = (int) ceil(5 * subgrid_cells * log(subgrid_cells) / log(2) );
#pragma omp parallel reduction(+:flops)
{
// Make sub-grids in grid and image space, and FFT plans
int subgrid_mem_size = sizeof(double complex) * subgrid_size * subgrid_size;
double complex *subgrid = (double complex *)malloc(subgrid_mem_size);
double complex *subimg = (double complex *)malloc(subgrid_mem_size);
fftw_plan fft_plan, ifft_plan;
#pragma omp critical
{
fft_plan = fftw_plan_dft_2d(subgrid_size, subgrid_size, subimg, subimg, -1, FFTW_MEASURE);
ifft_plan = fftw_plan_dft_2d(subgrid_size, subgrid_size, subgrid, subgrid, +1, FFTW_MEASURE);
}
// Grid chunks
int cc;
#pragma omp for schedule(dynamic)
for (cc = 0; cc < chunk_count * chunk_count; cc++) {
int cx = cc % chunk_count, cy = cc / chunk_count;
// Get our baselines bin
struct bl_data **bl_bin = bins[cy*chunk_count + cx];
int bl_count = bins_count[cy*chunk_count + cx];
if (bl_count == 0) { continue; }
//printf("%d %d/%d\n", omp_get_thread_num(), cx, cy);
// Determine tower base
int x_min = chunk_size*cx - grid_size/2;
int y_min = chunk_size*cy - grid_size/2;
double u_min = ((double)x_min - 0.5) / theta;
double v_min = ((double)y_min - 0.5) / theta;
double u_max = u_min + chunk_size / theta;
double v_max = v_min + chunk_size / theta;
// Midpoint in uvw. Important to note that for even-sized
// FFTs (likely what we are using!) this is slightly
// off-centre, so u_mid != (u_min + u_max) / 2.
double u_mid = (double)(x_min + chunk_size / 2) / theta;
double v_mid = (double)(y_min + chunk_size / 2) / theta;
// Clean subgrid
memset(subgrid, 0, subgrid_mem_size);
memset(subimg, 0, subgrid_mem_size);
// Go through w-planes
int have_vis = 0;
int last_wp = wp_min;
int wp;
for (wp = wp_min; wp <= wp_max; wp++) {
double w_mid = (double)wp * wincrement;
double w_min = ((double)wp - 0.5) * wincrement;
double w_max = ((double)wp + 0.5) * wincrement;
// Now grid all baselines for this uvw bin
uint64_t bin_flops = w_project_bin(subgrid, subgrid_size, theta, bl_bin, bl_count, wkern,
u_min, u_max, u_mid,
v_min, v_max, v_mid,
w_min, w_max, w_mid);
// Skip the rest if we found no visibilities
if (bin_flops == 0) { continue; }
flops += bin_flops;
have_vis = 1;
// IFFT the sub-grid in-place, add to image sum
fftw_execute(ifft_plan);
flops += subgrid_fft_flops;
// Bring image sum to our w-plane and add new data,
// clean subgrid for next w-plane.
int x, y;
for (y = 0; y < subgrid_size; y++) {
for (x = 0; x < subgrid_size; x++) {
double complex wtrans = cipow(wtransfer[y*subgrid_size + x], wp-last_wp);
subimg[y*subgrid_size + x] =
wtrans * subimg[y*subgrid_size + x] + subgrid[y*subgrid_size + x];
subgrid[y*subgrid_size + x] = 0;
}
}
flops += subgrid_cells * (8 + cipow_flops(wp-last_wp));
last_wp = wp;
}
// No visibilities? Skip
if (!have_vis) { continue; }
// Transfer to w=0 plane
if (last_wp != 0) {
int x, y;
for (y = 0; y < subgrid_size; y++) {
for (x = 0; x < subgrid_size; x++) {
subimg[y*subgrid_size + x] /= cipow(wtransfer[y*subgrid_size + x], last_wp);
}
}
flops += subgrid_cells * (8 + cipow_flops(wp-last_wp));
}
// FFT
fftw_execute(fft_plan);
flops += subgrid_fft_flops;
// Add to main grid, ignoring margins, which might be over
// bounds (should not actually happen, but let's be safe)
int x0 = x_min - subgrid_margin/2, x1 = x0 + subgrid_size;
int y0 = y_min - subgrid_margin/2, y1 = y0 + subgrid_size;
if (x0 < -grid_size/2) { x0 = -grid_size/2; }
if (y0 < -grid_size/2) { y0 = -grid_size/2; }
if (x1 > grid_size/2) { x1 = grid_size/2; }
if (y1 > grid_size/2) { y1 = grid_size/2; }
double complex *uvgrid_mid = uvgrid + (grid_size+1)*grid_size/2;
int x, y;
for (y = y0; y < y1; y++) {
for (x = x0; x < x1; x++) {
uvgrid_mid[x + y*grid_size] += subimg[(x-x_min+subgrid_margin/2) +
(y-y_min+subgrid_margin/2)*subgrid_size] / subgrid_size / subgrid_size;
}
}
flops += 2 * (y1 - y0) * (x1 - x0);
}
// Clean up
fftw_destroy_plan(fft_plan);
fftw_destroy_plan(ifft_plan);
free(subgrid);
free(subimg);
}
// Check that all visibilities were actually gridded
for (bl = 0; bl < vis->bl_count; bl++) {
if (vis->bl[bl].flops !=
8 * wkern->size_x * wkern->size_x * vis->bl[bl].time_count * vis->bl[bl].freq_count) {
printf("!!! bl %d-%d: %lu flops, %d expected !!!\n",
vis->bl[bl].antenna1, vis->bl[bl].antenna2,
vis->bl[bl].flops,
8 * wkern->size_x * wkern->size_x * vis->bl[bl].time_count * vis->bl[bl].freq_count);
}
}
int cx, cy;
for (cy = 0; cy < grid_size / chunk_size + 1; cy++) {
for (cx = 0; cx < grid_size / chunk_size + 1; cx++) {
free(bins[cy*chunk_count + cx]);
}
}
free(bins);
free(bins_count);
return flops;
}
void convolve_aw_kernels(struct bl_data *bl,
struct w_kernel_data *wkern,
struct a_kernel_data *akern) {
assert(wkern->size_x == akern->size_x);
assert(wkern->size_y == akern->size_y);
int size_x = wkern->size_x, size_y = wkern->size_y;
int ov = wkern->oversampling;
// We assume that every time channel has their own w-kernel
const int awkern_count = bl->time_count * akern->freq_count;
const int awkern_size = size_x * size_y * ov * ov;
bl->awkern = (double complex *)malloc(awkern_count * awkern_size * sizeof(double complex));
int time, freq;
for (time = 0; time < bl->time_count; time++) {
for (freq = 0; freq < akern->freq_count; freq++) {
double t = bl->time[time];
int atime = (int)floor((t - akern->t_min) / akern->t_step + .5);
int a1i = bl->antenna1 * akern->time_count * akern->freq_count + atime * akern->freq_count + freq;
int a2i = bl->antenna2 * akern->time_count * akern->freq_count + atime * akern->freq_count + freq;
struct a_kernel *a1k = &akern->kern_by_atf[a1i];
struct a_kernel *a2k = &akern->kern_by_atf[a2i];
double w = uvw_m_to_l(bl->uvw_m[time*3+2], a1k->freq);
int w_plane = (int)floor((w - wkern->w_min) / wkern->w_step + .5);
struct w_kernel *wk = &wkern->kern_by_w[w_plane];
// Here is where we normally would convolve the kernel -
// but it doesn't matter for this test, so we just copy
// the w-kernel.
memcpy(&bl->awkern[(time * akern->freq_count + freq) * awkern_size],
wk->data,
awkern_size * sizeof(double complex));
}
}
}
uint64_t grid_awprojection(double complex *uvgrid, int grid_size, double theta,
struct vis_data *vis,
struct w_kernel_data *wkern,
struct a_kernel_data *akern,
int bl_min, int bl_max) {
// Note that we require awkern to be set on all baselines
// processed here!
uint64_t flops = 0;
int bl, time, freq;
for (bl = bl_min; bl < bl_max; bl++) {
const int awkern_size = akern->size_x * akern->size_y *
wkern->oversampling * wkern->oversampling;
const struct bl_data *pbl = &vis->bl[bl];
for (time = 0; time < pbl->time_count; time++) {
for (freq = 0; freq < pbl->freq_count; freq++) {
// Calculate grid and sub-grid coordinates
int grid_offset, sub_offset;
frac_coord_2d(grid_size, wkern->size_x, wkern->oversampling,
theta, &vis->bl[bl], time, freq, 0, 0,
&grid_offset, &sub_offset);
// Determine kernel frequency, get kernel
int afreq = (int)floor((pbl->freq[freq] - akern->f_min)
/ akern->f_step + .5);
double complex *awk = &pbl->awkern[
(time * akern->freq_count + afreq) * awkern_size];
// Get visibility
double complex v = vis->bl[bl].vis[time*vis->bl[bl].freq_count+freq];
int x, y;
for (y = 0; y < wkern->size_y; y++) {
for (x = 0; x < wkern->size_x; x++) {
uvgrid[grid_offset + y*grid_size + x]
+= v * conj(awk[sub_offset + y*wkern->size_x + x]);
}
}
flops += 8 * wkern->size_x * wkern->size_y;
}
}
}
return flops;
}
void make_hermitian(double complex *uvgrid, int grid_size) {
// Determine start index. For even-sized grids, the zero frequency
// is at grid_size/2, so things are off by one.
complex double *p0;
if (grid_size % 2 == 0) {
p0 = uvgrid + grid_size + 1;
} else {
p0 = uvgrid;
}
complex double *p1 = uvgrid + grid_size * grid_size - 1;
// Now simply add cells from the other side of the grid
while(p0 < p1) {
double complex g0 = *p0;
*p0++ += conj(*p1);
*p1-- += conj(g0);
}
// Should end up exactly on the zero frequency
assert( p0 == p1 && p0 == uvgrid + (grid_size+1) * (grid_size/2) );
*p0 += conj(*p0);
}
void fft_shift(double complex *uvgrid, int grid_size) {
// Shift the FFT
assert(grid_size % 2 == 0);
int x, y;
for (y = 0; y < grid_size; y++) {
for (x = 0; x < grid_size/2; x++) {
int ix0 = y * grid_size + x;
int ix1 = (ix0 + (grid_size+1) * (grid_size/2)) % (grid_size*grid_size);
double complex temp = uvgrid[ix0];
uvgrid[ix0] = uvgrid[ix1];
uvgrid[ix1] = temp;
}
}
}
|
GB_unop__isfinite_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isfinite_bool_fp64)
// op(A') function: GB (_unop_tran__isfinite_bool_fp64)
// C type: bool
// A type: double
// cast: double cij = (aij)
// unaryop: cij = isfinite (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isfinite (x) ;
// casting
#define GB_CAST(z, aij) \
double z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (aij) ; \
Cx [pC] = isfinite (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isfinite_bool_fp64)
(
bool *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = (aij) ;
Cx [p] = isfinite (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = (aij) ;
Cx [p] = isfinite (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isfinite_bool_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_int8_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int8_uint16
// op(A') function: GB_tran__abs_int8_uint16
// C type: int8_t
// A type: uint16_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int8_uint16
(
int8_t *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cp-tree.h | /* Definitions for C++ parsing and type checking.
Copyright (C) 1987, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
#ifndef GCC_CP_TREE_H
#define GCC_CP_TREE_H
#include "ggc.h"
#include "function.h"
#include "hashtab.h"
#include "splay-tree.h"
#include "vec.h"
#include "varray.h"
#include "c-common.h"
#include "name-lookup.h"
struct diagnostic_context;
/* Usage of TREE_LANG_FLAG_?:
0: IDENTIFIER_MARKED (IDENTIFIER_NODEs)
NEW_EXPR_USE_GLOBAL (in NEW_EXPR).
DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR).
COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR).
TREE_INDIRECT_USING (in NAMESPACE_DECL).
CLEANUP_P (in TRY_BLOCK)
AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR)
PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF)
PAREN_STRING_LITERAL (in STRING_CST)
DECL_PRETTY_FUNCTION_P (in VAR_DECL)
KOENIG_LOOKUP_P (in CALL_EXPR)
STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST).
EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT)
STMT_EXPR_NO_SCOPE (in STMT_EXPR)
BIND_EXPR_TRY_BLOCK (in BIND_EXPR)
TYPENAME_IS_ENUM_P (in TYPENAME_TYPE)
REFERENCE_REF_P (in INDIRECT_EXPR)
QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF)
OMP_ATOMIC_DEPENDENT_P (in OMP_ATOMIC)
OMP_FOR_GIMPLIFYING_P (in OMP_FOR)
BASELINK_QUALIFIED_P (in BASELINK)
TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR)
1: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE)
TI_PENDING_TEMPLATE_FLAG.
TEMPLATE_PARMS_FOR_INLINE.
DELETE_EXPR_USE_VEC (in DELETE_EXPR).
(TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out).
ICS_ELLIPSIS_FLAG (in _CONV)
DECL_INITIALIZED_P (in VAR_DECL)
TYPENAME_IS_CLASS_P (in TYPENAME_TYPE)
STMT_IS_FULL_EXPR_P (in _STMT)
2: IDENTIFIER_OPNAME_P (in IDENTIFIER_NODE)
ICS_THIS_FLAG (in _CONV)
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL)
STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST)
3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out).
ICS_BAD_FLAG (in _CONV)
FN_TRY_BLOCK_P (in TRY_BLOCK)
IDENTIFIER_CTOR_OR_DTOR_P (in IDENTIFIER_NODE)
BIND_EXPR_BODY_BLOCK (in BIND_EXPR)
DECL_NON_TRIVIALLY_INITIALIZED_P (in VAR_DECL)
4: TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR,
or FIELD_DECL).
IDENTIFIER_TYPENAME_P (in IDENTIFIER_NODE)
DECL_TINFO_P (in VAR_DECL)
5: C_IS_RESERVED_WORD (in IDENTIFIER_NODE)
DECL_VTABLE_OR_VTT_P (in VAR_DECL)
6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE)
DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL)
TYPE_MARKED_P (in _TYPE)
Usage of TYPE_LANG_FLAG_?:
0: TYPE_DEPENDENT_P
1: TYPE_HAS_CONSTRUCTOR.
2: Unused
3: TYPE_FOR_JAVA.
4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR
5: IS_AGGR_TYPE.
6: TYPE_DEPENDENT_P_VALID
Usage of DECL_LANG_FLAG_?:
0: DECL_ERROR_REPORTED (in VAR_DECL).
DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL)
DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL)
DECL_MUTABLE_P (in FIELD_DECL)
DECL_DEPENDENT_P (in USING_DECL)
1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL).
DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL)
DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL)
2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL).
DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL)
3: DECL_IN_AGGR_P.
4: DECL_C_BIT_FIELD (in a FIELD_DECL)
DECL_ANON_UNION_VAR_P (in a VAR_DECL)
DECL_SELF_REFERENCE_P (in a TYPE_DECL)
DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL)
5: DECL_INTERFACE_KNOWN.
6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL).
DECL_FIELD_IS_BASE (in FIELD_DECL)
7: DECL_DEAD_FOR_LOCAL (in VAR_DECL).
DECL_THUNK_P (in a member FUNCTION_DECL)
Usage of language-independent fields in a language-dependent manner:
TYPE_ALIAS_SET
This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so
forth as a substitute for the mark bits provided in `lang_type'.
At present, only the six low-order bits are used.
TYPE_LANG_SLOT_1
For an ENUMERAL_TYPE, this is ENUM_TEMPLATE_INFO.
For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS
BINFO_VIRTUALS
For a binfo, this is a TREE_LIST. There is an entry for each
virtual function declared either in BINFO or its direct and
indirect primary bases.
The BV_DELTA of each node gives the amount by which to adjust the
`this' pointer when calling the function. If the method is an
overridden version of a base class method, then it is assumed
that, prior to adjustment, the this pointer points to an object
of the base class.
The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable
index of the vcall offset for this entry.
The BV_FN is the declaration for the virtual function itself.
BINFO_VTABLE
This is an expression with POINTER_TYPE that gives the value
to which the vptr should be initialized. Use get_vtbl_decl_for_binfo
to extract the VAR_DECL for the complete vtable.
DECL_ARGUMENTS
For a VAR_DECL this is DECL_ANON_UNION_ELEMS.
DECL_VINDEX
This field is NULL for a non-virtual function. For a virtual
function, it is eventually set to an INTEGER_CST indicating the
index in the vtable at which this function can be found. When
a virtual function is declared, but before it is known what
function is overridden, this field is the error_mark_node.
Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is
the virtual function this one overrides, and whose TREE_CHAIN is
the old DECL_VINDEX. */
/* Language-specific tree checkers. */
#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL)
#define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL)
#define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \
TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define NON_THUNK_FUNCTION_CHECK(NODE) __extension__ \
({ const tree __t = (NODE); \
if (TREE_CODE (__t) != FUNCTION_DECL && \
TREE_CODE (__t) != TEMPLATE_DECL && __t->decl_common.lang_specific \
&& __t->decl_common.lang_specific->decl_flags.thunk_p) \
tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \
__t; })
#define THUNK_FUNCTION_CHECK(NODE) __extension__ \
({ const tree __t = (NODE); \
if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \
|| !__t->decl_common.lang_specific->decl_flags.thunk_p) \
tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \
__t; })
#else
#define NON_THUNK_FUNCTION_CHECK(NODE) (NODE)
#define THUNK_FUNCTION_CHECK(NODE) (NODE)
#endif
/* Language-dependent contents of an identifier. */
struct lang_identifier GTY(())
{
struct c_common_identifier c_common;
cxx_binding *namespace_bindings;
cxx_binding *bindings;
tree class_template_info;
tree label_value;
};
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_5 (ID)
#define LANG_IDENTIFIER_CAST(NODE) \
((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE))
struct template_parm_index_s GTY(())
{
struct tree_common common;
HOST_WIDE_INT index;
HOST_WIDE_INT level;
HOST_WIDE_INT orig_level;
tree decl;
};
typedef struct template_parm_index_s template_parm_index;
struct tinst_level_s GTY(())
{
struct tree_common common;
tree decl;
location_t locus;
int in_system_header_p;
};
typedef struct tinst_level_s * tinst_level_t;
struct ptrmem_cst GTY(())
{
struct tree_common common;
/* This isn't used, but the middle-end expects all constants to have
this field. */
rtx rtl;
tree member;
};
typedef struct ptrmem_cst * ptrmem_cst_t;
#define IDENTIFIER_GLOBAL_VALUE(NODE) \
namespace_binding ((NODE), global_namespace)
#define SET_IDENTIFIER_GLOBAL_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), global_namespace, (VAL))
#define IDENTIFIER_NAMESPACE_VALUE(NODE) \
namespace_binding ((NODE), current_namespace)
#define SET_IDENTIFIER_NAMESPACE_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), current_namespace, (VAL))
#define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE))
#define BIND_EXPR_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE))
/* Used to mark the block around the member initializers and cleanups. */
#define BIND_EXPR_BODY_BLOCK(NODE) \
TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE))
#define FUNCTION_NEEDS_BODY_BLOCK(NODE) \
(DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE))
#define STATEMENT_LIST_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE))
#define STATEMENT_LIST_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE))
/* Nonzero if this statement should be considered a full-expression,
i.e., if temporaries created during this statement should have
their destructors run at the end of this statement. */
#define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE))
/* Marks the result of a statement expression. */
#define EXPR_STMT_STMT_EXPR_RESULT(NODE) \
TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE))
/* Nonzero if this statement-expression does not have an associated scope. */
#define STMT_EXPR_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE))
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
sense of `same'. */
#define same_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, ignoring
top-level qualifiers. */
#define same_type_ignoring_top_level_qualifiers_p(TYPE1, TYPE2) \
same_type_p (TYPE_MAIN_VARIANT (TYPE1), TYPE_MAIN_VARIANT (TYPE2))
/* Nonzero if we are presently building a statement tree, rather
than expanding each statement as we encounter it. */
#define building_stmt_tree() (cur_stmt_list != NULL_TREE)
/* Returns nonzero iff NODE is a declaration for the global function
`main'. */
#define DECL_MAIN_P(NODE) \
(DECL_EXTERN_C_FUNCTION_P (NODE) \
&& DECL_NAME (NODE) != NULL_TREE \
&& MAIN_NAME_P (DECL_NAME (NODE)))
/* The overloaded FUNCTION_DECL. */
#define OVL_FUNCTION(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->function)
#define OVL_CHAIN(NODE) TREE_CHAIN (NODE)
/* Polymorphic access to FUNCTION and CHAIN. */
#define OVL_CURRENT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? OVL_FUNCTION (NODE) : (NODE))
#define OVL_NEXT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? TREE_CHAIN (NODE) : NULL_TREE)
/* If set, this was imported in a using declaration.
This is not to confuse with being used somewhere, which
is not important for this node. */
#define OVL_USED(NODE) TREE_USED (NODE)
struct tree_overload GTY(())
{
struct tree_common common;
tree function;
};
/* Returns true iff NODE is a BASELINK. */
#define BASELINK_P(NODE) \
(TREE_CODE (NODE) == BASELINK)
/* The BINFO indicating the base from which the BASELINK_FUNCTIONS came. */
#define BASELINK_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo)
/* The functions referred to by the BASELINK; either a FUNCTION_DECL,
a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */
#define BASELINK_FUNCTIONS(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->functions)
/* The BINFO in which the search for the functions indicated by this baselink
began. This base is used to determine the accessibility of functions
selected by overload resolution. */
#define BASELINK_ACCESS_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo)
/* For a type-conversion operator, the BASELINK_OPTYPE indicates the type
to which the conversion should occur. This value is important if
the BASELINK_FUNCTIONS include a template conversion operator --
the BASELINK_OPTYPE can be used to determine what type the user
requested. */
#define BASELINK_OPTYPE(NODE) \
(TREE_CHAIN (BASELINK_CHECK (NODE)))
/* Non-zero if this baselink was from a qualified lookup. */
#define BASELINK_QUALIFIED_P(NODE) \
TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE))
struct tree_baselink GTY(())
{
struct tree_common common;
tree binfo;
tree functions;
tree access_binfo;
};
/* The different kinds of ids that we encounter. */
typedef enum cp_id_kind
{
/* Not an id at all. */
CP_ID_KIND_NONE,
/* An unqualified-id that is not a template-id. */
CP_ID_KIND_UNQUALIFIED,
/* An unqualified-id that is a dependent name. */
CP_ID_KIND_UNQUALIFIED_DEPENDENT,
/* An unqualified template-id. */
CP_ID_KIND_TEMPLATE_ID,
/* A qualified-id. */
CP_ID_KIND_QUALIFIED
} cp_id_kind;
/* Macros for access to language-specific slots in an identifier. */
#define IDENTIFIER_NAMESPACE_BINDINGS(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->namespace_bindings)
#define IDENTIFIER_TEMPLATE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->class_template_info)
/* The IDENTIFIER_BINDING is the innermost cxx_binding for the
identifier. It's PREVIOUS is the next outermost binding. Each
VALUE field is a DECL for the associated declaration. Thus,
name lookup consists simply of pulling off the node at the front
of the list (modulo oddities for looking up the names of types,
and such.) You can use SCOPE field to determine the scope
that bound the name. */
#define IDENTIFIER_BINDING(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->bindings)
/* TREE_TYPE only indicates on local and class scope the current
type. For namespace scope, the presence of a type in any namespace
is indicated with global_type_node, and the real type behind must
be found through lookup. */
#define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE)
#define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE)
#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE))
#define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0)
#define IDENTIFIER_LABEL_VALUE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->label_value)
#define SET_IDENTIFIER_LABEL_VALUE(NODE, VALUE) \
IDENTIFIER_LABEL_VALUE (NODE) = (VALUE)
/* Nonzero if this identifier is used as a virtual function name somewhere
(optimizes searches). */
#define IDENTIFIER_VIRTUAL_P(NODE) TREE_LANG_FLAG_1 (NODE)
/* Nonzero if this identifier is the prefix for a mangled C++ operator
name. */
#define IDENTIFIER_OPNAME_P(NODE) TREE_LANG_FLAG_2 (NODE)
/* Nonzero if this identifier is the name of a type-conversion
operator. */
#define IDENTIFIER_TYPENAME_P(NODE) \
TREE_LANG_FLAG_4 (NODE)
/* Nonzero if this identifier is the name of a constructor or
destructor. */
#define IDENTIFIER_CTOR_OR_DTOR_P(NODE) \
TREE_LANG_FLAG_3 (NODE)
/* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague
linkage which the prelinker has assigned to this translation
unit. */
#define IDENTIFIER_REPO_CHOSEN(NAME) \
(TREE_LANG_FLAG_6 (NAME))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly)
/* The tokens stored in the default argument. */
#define DEFARG_TOKENS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens)
#define DEFARG_INSTANTIATIONS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations)
struct tree_default_arg GTY (())
{
struct tree_common common;
struct cp_token_cache *tokens;
VEC(tree,gc) *instantiations;
};
enum cp_tree_node_structure_enum {
TS_CP_GENERIC,
TS_CP_IDENTIFIER,
TS_CP_TPI,
TS_CP_TINST_LEVEL,
TS_CP_PTRMEM,
TS_CP_BINDING,
TS_CP_OVERLOAD,
TS_CP_BASELINK,
TS_CP_WRAPPER,
TS_CP_DEFAULT_ARG,
LAST_TS_CP_ENUM
};
/* The resulting tree type. */
union lang_tree_node GTY((desc ("cp_tree_node_structure (&%h)"),
chain_next ("(union lang_tree_node *)TREE_CHAIN (&%h.generic)")))
{
union tree_node GTY ((tag ("TS_CP_GENERIC"),
desc ("tree_node_structure (&%h)"))) generic;
struct template_parm_index_s GTY ((tag ("TS_CP_TPI"))) tpi;
struct tinst_level_s GTY ((tag ("TS_CP_TINST_LEVEL"))) tinst_level;
struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem;
struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload;
struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink;
struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg;
struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier;
};
enum cp_tree_index
{
CPTI_JAVA_BYTE_TYPE,
CPTI_JAVA_SHORT_TYPE,
CPTI_JAVA_INT_TYPE,
CPTI_JAVA_LONG_TYPE,
CPTI_JAVA_FLOAT_TYPE,
CPTI_JAVA_DOUBLE_TYPE,
CPTI_JAVA_CHAR_TYPE,
CPTI_JAVA_BOOLEAN_TYPE,
CPTI_WCHAR_DECL,
CPTI_VTABLE_ENTRY_TYPE,
CPTI_DELTA_TYPE,
CPTI_VTABLE_INDEX_TYPE,
CPTI_CLEANUP_TYPE,
CPTI_VTT_PARM_TYPE,
CPTI_CLASS_TYPE,
CPTI_UNKNOWN_TYPE,
CPTI_VTBL_TYPE,
CPTI_VTBL_PTR_TYPE,
CPTI_STD,
CPTI_ABI,
CPTI_CONST_TYPE_INFO_TYPE,
CPTI_TYPE_INFO_PTR_TYPE,
CPTI_ABORT_FNDECL,
CPTI_GLOBAL_DELETE_FNDECL,
CPTI_AGGR_TAG,
CPTI_CTOR_IDENTIFIER,
CPTI_COMPLETE_CTOR_IDENTIFIER,
CPTI_BASE_CTOR_IDENTIFIER,
CPTI_DTOR_IDENTIFIER,
CPTI_COMPLETE_DTOR_IDENTIFIER,
CPTI_BASE_DTOR_IDENTIFIER,
CPTI_DELETING_DTOR_IDENTIFIER,
CPTI_DELTA_IDENTIFIER,
CPTI_IN_CHARGE_IDENTIFIER,
CPTI_VTT_PARM_IDENTIFIER,
CPTI_NELTS_IDENTIFIER,
CPTI_THIS_IDENTIFIER,
CPTI_PFN_IDENTIFIER,
CPTI_VPTR_IDENTIFIER,
CPTI_STD_IDENTIFIER,
CPTI_LANG_NAME_C,
CPTI_LANG_NAME_CPLUSPLUS,
CPTI_LANG_NAME_JAVA,
CPTI_EMPTY_EXCEPT_SPEC,
CPTI_JCLASS,
CPTI_TERMINATE,
CPTI_CALL_UNEXPECTED,
CPTI_ATEXIT,
CPTI_DSO_HANDLE,
CPTI_DCAST,
CPTI_KEYED_CLASSES,
CPTI_MAX
};
extern GTY(()) tree cp_global_trees[CPTI_MAX];
#define java_byte_type_node cp_global_trees[CPTI_JAVA_BYTE_TYPE]
#define java_short_type_node cp_global_trees[CPTI_JAVA_SHORT_TYPE]
#define java_int_type_node cp_global_trees[CPTI_JAVA_INT_TYPE]
#define java_long_type_node cp_global_trees[CPTI_JAVA_LONG_TYPE]
#define java_float_type_node cp_global_trees[CPTI_JAVA_FLOAT_TYPE]
#define java_double_type_node cp_global_trees[CPTI_JAVA_DOUBLE_TYPE]
#define java_char_type_node cp_global_trees[CPTI_JAVA_CHAR_TYPE]
#define java_boolean_type_node cp_global_trees[CPTI_JAVA_BOOLEAN_TYPE]
#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL]
#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE]
/* The type used to represent an offset by which to adjust the `this'
pointer in pointer-to-member types. */
#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE]
/* The type used to represent an index into the vtable. */
#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE]
#define class_type_node cp_global_trees[CPTI_CLASS_TYPE]
#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE]
#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE]
#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE]
#define std_node cp_global_trees[CPTI_STD]
#define abi_node cp_global_trees[CPTI_ABI]
#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE]
#define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE]
#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL]
#define global_delete_fndecl cp_global_trees[CPTI_GLOBAL_DELETE_FNDECL]
#define current_aggr cp_global_trees[CPTI_AGGR_TAG]
/* We cache these tree nodes so as to call get_identifier less
frequently. */
/* The name of a constructor that takes an in-charge parameter to
decide whether or not to construct virtual base classes. */
#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER]
/* The name of a constructor that constructs virtual base classes. */
#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER]
/* The name of a constructor that does not construct virtual base classes. */
#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER]
/* The name of a destructor that takes an in-charge parameter to
decide whether or not to destroy virtual base classes and whether
or not to delete the object. */
#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes. */
#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER]
/* The name of a destructor that does not destroy virtual base
classes. */
#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes, and
then deletes the entire object. */
#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER]
#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER]
#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER]
/* The name of the parameter that contains a pointer to the VTT to use
for this subobject constructor or destructor. */
#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER]
#define nelts_identifier cp_global_trees[CPTI_NELTS_IDENTIFIER]
#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER]
#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER]
#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER]
/* The name of the std namespace. */
#define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER]
#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
#define lang_name_java cp_global_trees[CPTI_LANG_NAME_JAVA]
/* Exception specifier used for throw(). */
#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC]
/* If non-NULL, a POINTER_TYPE equivalent to (java::lang::Class*). */
#define jclass_node cp_global_trees[CPTI_JCLASS]
/* The declaration for `std::terminate'. */
#define terminate_node cp_global_trees[CPTI_TERMINATE]
/* The declaration for "__cxa_call_unexpected". */
#define call_unexpected_node cp_global_trees[CPTI_CALL_UNEXPECTED]
/* A pointer to `std::atexit'. */
#define atexit_node cp_global_trees[CPTI_ATEXIT]
/* A pointer to `__dso_handle'. */
#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE]
/* The declaration of the dynamic_cast runtime. */
#define dynamic_cast_node cp_global_trees[CPTI_DCAST]
/* The type of a destructor. */
#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE]
/* The type of the vtt parameter passed to subobject constructors and
destructors. */
#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE]
/* A TREE_LIST of the dynamic classes whose vtables may have to be
emitted in this translation unit. */
#define keyed_classes cp_global_trees[CPTI_KEYED_CLASSES]
/* Node to indicate default access. This must be distinct from the
access nodes in tree.h. */
#define access_default_node null_node
/* Global state. */
struct saved_scope GTY(())
{
VEC(cxx_saved_binding,gc) *old_bindings;
tree old_namespace;
tree decl_ns_list;
tree class_name;
tree class_type;
tree access_specifier;
tree function_decl;
VEC(tree,gc) *lang_base;
tree lang_name;
tree template_parms;
struct cp_binding_level *x_previous_class_level;
tree x_saved_tree;
HOST_WIDE_INT x_processing_template_decl;
int x_processing_specialization;
bool x_processing_explicit_instantiation;
int need_pop_function_context;
bool skip_evaluation;
struct stmt_tree_s x_stmt_tree;
struct cp_binding_level *class_bindings;
struct cp_binding_level *bindings;
struct saved_scope *prev;
};
/* The current open namespace. */
#define current_namespace scope_chain->old_namespace
/* The stack for namespaces of current declarations. */
#define decl_namespace_list scope_chain->decl_ns_list
/* IDENTIFIER_NODE: name of current class */
#define current_class_name scope_chain->class_name
/* _TYPE: the type of the current class */
#define current_class_type scope_chain->class_type
/* When parsing a class definition, the access specifier most recently
given by the user, or, if no access specifier was given, the
default value appropriate for the kind of class (i.e., struct,
class, or union). */
#define current_access_specifier scope_chain->access_specifier
/* Pointer to the top of the language name stack. */
#define current_lang_base scope_chain->lang_base
#define current_lang_name scope_chain->lang_name
/* Parsing a function declarator leaves a list of parameter names
or a chain or parameter decls here. */
#define current_template_parms scope_chain->template_parms
#define processing_template_decl scope_chain->x_processing_template_decl
#define processing_specialization scope_chain->x_processing_specialization
#define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation
/* The cached class binding level, from the most recently exited
class, or NULL if none. */
#define previous_class_level scope_chain->x_previous_class_level
/* A list of private types mentioned, for deferred access checking. */
extern GTY(()) struct saved_scope *scope_chain;
struct cxx_int_tree_map GTY(())
{
unsigned int uid;
tree to;
};
extern unsigned int cxx_int_tree_map_hash (const void *);
extern int cxx_int_tree_map_eq (const void *, const void *);
/* Global state pertinent to the current function. */
struct language_function GTY(())
{
struct c_language_function base;
tree x_cdtor_label;
tree x_current_class_ptr;
tree x_current_class_ref;
tree x_eh_spec_block;
tree x_in_charge_parm;
tree x_vtt_parm;
tree x_return_value;
int returns_value;
int returns_null;
int returns_abnormally;
int in_function_try_handler;
int in_base_initializer;
/* True if this function can throw an exception. */
BOOL_BITFIELD can_throw : 1;
htab_t GTY((param_is(struct named_label_entry))) x_named_labels;
struct cp_binding_level *bindings;
VEC(tree,gc) *x_local_names;
htab_t GTY((param_is (struct cxx_int_tree_map))) extern_decl_map;
};
/* The current C++-specific per-function global variables. */
#define cp_function_chain (cfun->language)
/* In a constructor destructor, the point at which all derived class
destroying/construction has been has been done. Ie. just before a
constructor returns, or before any base class destroying will be done
in a destructor. */
#define cdtor_label cp_function_chain->x_cdtor_label
/* When we're processing a member function, current_class_ptr is the
PARM_DECL for the `this' pointer. The current_class_ref is an
expression for `*this'. */
#define current_class_ptr \
(cfun ? cp_function_chain->x_current_class_ptr : NULL_TREE)
#define current_class_ref \
(cfun ? cp_function_chain->x_current_class_ref : NULL_TREE)
/* The EH_SPEC_BLOCK for the exception-specifiers for the current
function, if any. */
#define current_eh_spec_block cp_function_chain->x_eh_spec_block
/* The `__in_chrg' parameter for the current function. Only used for
constructors and destructors. */
#define current_in_charge_parm cp_function_chain->x_in_charge_parm
/* The `__vtt_parm' parameter for the current function. Only used for
constructors and destructors. */
#define current_vtt_parm cp_function_chain->x_vtt_parm
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
#define current_function_returns_value cp_function_chain->returns_value
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
#define current_function_returns_null cp_function_chain->returns_null
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
#define current_function_returns_abnormally \
cp_function_chain->returns_abnormally
/* Nonzero if we are processing a base initializer. Zero elsewhere. */
#define in_base_initializer cp_function_chain->in_base_initializer
#define in_function_try_handler cp_function_chain->in_function_try_handler
/* Expression always returned from function, or error_mark_node
otherwise, for use by the automatic named return value optimization. */
#define current_function_return_value \
(cp_function_chain->x_return_value)
/* True if NAME is the IDENTIFIER_NODE for an overloaded "operator
new" or "operator delete". */
#define NEW_DELETE_OPNAME_P(NAME) \
((NAME) == ansi_opname (NEW_EXPR) \
|| (NAME) == ansi_opname (VEC_NEW_EXPR) \
|| (NAME) == ansi_opname (DELETE_EXPR) \
|| (NAME) == ansi_opname (VEC_DELETE_EXPR))
#define ansi_opname(CODE) \
(operator_name_info[(int) (CODE)].identifier)
#define ansi_assopname(CODE) \
(assignment_operator_name_info[(int) (CODE)].identifier)
/* True if NODE is an erroneous expression. */
#define error_operand_p(NODE) \
((NODE) == error_mark_node \
|| ((NODE) && TREE_TYPE ((NODE)) == error_mark_node))
/* C++ language-specific tree codes. */
#define DEFTREECODE(SYM, NAME, TYPE, LENGTH) SYM,
enum cplus_tree_code {
CP_DUMMY_TREE_CODE = LAST_C_TREE_CODE,
#include "cp-tree.def"
LAST_CPLUS_TREE_CODE
};
#undef DEFTREECODE
/* TRUE if a tree code represents a statement. */
extern bool statement_code_p[MAX_TREE_CODES];
#define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)]
enum languages { lang_c, lang_cplusplus, lang_java };
/* Macros to make error reporting functions' lives easier. */
#define TYPE_IDENTIFIER(NODE) (DECL_NAME (TYPE_NAME (NODE)))
#define TYPE_LINKAGE_IDENTIFIER(NODE) \
(TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE)))
#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
/* Nonzero if NODE has no name for linkage purposes. */
#define TYPE_ANONYMOUS_P(NODE) \
(TAGGED_TYPE_P (NODE) && ANON_AGGRNAME_P (TYPE_LINKAGE_IDENTIFIER (NODE)))
/* The _DECL for this _TYPE. */
#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
/* Nonzero if T is a class (or struct or union) type. Also nonzero
for template type parameters, typename types, and instantiated
template template parameters. Despite its name,
this macro has nothing to do with the definition of aggregate given
in the standard. Think of this macro as MAYBE_CLASS_TYPE_P. Keep
these checks in ascending code order. */
#define IS_AGGR_TYPE(T) \
(TREE_CODE (T) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (T) == TYPENAME_TYPE \
|| TREE_CODE (T) == TYPEOF_TYPE \
|| TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| TYPE_LANG_FLAG_5 (T))
/* Set IS_AGGR_TYPE for T to VAL. T must be a class, struct, or
union type. */
#define SET_IS_AGGR_TYPE(T, VAL) \
(TYPE_LANG_FLAG_5 (T) = (VAL))
/* Nonzero if T is a class type. Zero for template type parameters,
typename types, and so forth. */
#define CLASS_TYPE_P(T) \
(IS_AGGR_TYPE_CODE (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T))
/* Keep these checks in ascending code order. */
#define IS_AGGR_TYPE_CODE(T) \
((T) == RECORD_TYPE || (T) == UNION_TYPE)
#define TAGGED_TYPE_P(T) \
(CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE)
#define IS_OVERLOAD_TYPE(T) TAGGED_TYPE_P (T)
/* True if this a "Java" type, defined in 'extern "Java"'. */
#define TYPE_FOR_JAVA(NODE) TYPE_LANG_FLAG_3 (NODE)
/* True if this type is dependent. This predicate is only valid if
TYPE_DEPENDENT_P_VALID is true. */
#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE)
/* True if dependent_type_p has been called for this type, with the
result that TYPE_DEPENDENT_P is valid. */
#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE)
/* Nonzero if this type is const-qualified. */
#define CP_TYPE_CONST_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0)
/* Nonzero if this type is volatile-qualified. */
#define CP_TYPE_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0)
/* Nonzero if this type is restrict-qualified. */
#define CP_TYPE_RESTRICT_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0)
/* Nonzero if this type is const-qualified, but not
volatile-qualified. Other qualifiers are ignored. This macro is
used to test whether or not it is OK to bind an rvalue to a
reference. */
#define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \
== TYPE_QUAL_CONST)
#define FUNCTION_ARG_CHAIN(NODE) \
TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES
which refers to a user-written parameter. */
#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \
skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Similarly, but for DECL_ARGUMENTS. */
#define FUNCTION_FIRST_USER_PARM(NODE) \
skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE))
#define PROMOTES_TO_AGGR_TYPE(NODE, CODE) \
(((CODE) == TREE_CODE (NODE) \
&& IS_AGGR_TYPE (TREE_TYPE (NODE))) \
|| IS_AGGR_TYPE (NODE))
/* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and
ambiguity issues. */
#define DERIVED_FROM_P(PARENT, TYPE) \
(lookup_base ((TYPE), (PARENT), ba_any, NULL) != NULL_TREE)
/* Nonzero iff TYPE is uniquely derived from PARENT. Ignores
accessibility. */
#define UNIQUELY_DERIVED_FROM_P(PARENT, TYPE) \
(lookup_base ((TYPE), (PARENT), ba_unique | ba_quiet, NULL) != NULL_TREE)
/* Nonzero iff TYPE is publicly & uniquely derived from PARENT. */
#define PUBLICLY_UNIQUELY_DERIVED_P(PARENT, TYPE) \
(lookup_base ((TYPE), (PARENT), ba_ignore_scope | ba_check | ba_quiet, \
NULL) != NULL_TREE)
/* Gives the visibility specification for a class type. */
#define CLASSTYPE_VISIBILITY(TYPE) \
DECL_VISIBILITY (TYPE_NAME (TYPE))
#define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \
DECL_VISIBILITY_SPECIFIED (TYPE_NAME (TYPE))
typedef struct tree_pair_s GTY (())
{
tree purpose;
tree value;
} tree_pair_s;
typedef tree_pair_s *tree_pair_p;
DEF_VEC_O (tree_pair_s);
DEF_VEC_ALLOC_O (tree_pair_s,gc);
/* This is a few header flags for 'struct lang_type'. Actually,
all but the first are used only for lang_type_class; they
are put in this structure to save space. */
struct lang_type_header GTY(())
{
BOOL_BITFIELD is_lang_type_class : 1;
BOOL_BITFIELD has_type_conversion : 1;
BOOL_BITFIELD has_init_ref : 1;
BOOL_BITFIELD has_default_ctor : 1;
BOOL_BITFIELD const_needs_init : 1;
BOOL_BITFIELD ref_needs_init : 1;
BOOL_BITFIELD has_const_assign_ref : 1;
BOOL_BITFIELD spare : 1;
};
/* This structure provides additional information above and beyond
what is provide in the ordinary tree_type. In the past, we used it
for the types of class types, template parameters types, typename
types, and so forth. However, there can be many (tens to hundreds
of thousands) of template parameter types in a compilation, and
there's no need for this additional information in that case.
Therefore, we now use this data structure only for class types.
In the past, it was thought that there would be relatively few
class types. However, in the presence of heavy use of templates,
many (i.e., thousands) of classes can easily be generated.
Therefore, we should endeavor to keep the size of this structure to
a minimum. */
struct lang_type_class GTY(())
{
struct lang_type_header h;
unsigned char align;
unsigned has_mutable : 1;
unsigned com_interface : 1;
unsigned non_pod_class : 1;
unsigned nearly_empty_p : 1;
unsigned user_align : 1;
unsigned has_assign_ref : 1;
unsigned has_new : 1;
unsigned has_array_new : 1;
unsigned gets_delete : 2;
unsigned interface_only : 1;
unsigned interface_unknown : 1;
unsigned contains_empty_class_p : 1;
unsigned anon_aggr : 1;
unsigned non_zero_init : 1;
unsigned empty_p : 1;
unsigned vec_new_uses_cookie : 1;
unsigned declared_class : 1;
unsigned diamond_shaped : 1;
unsigned repeated_base : 1;
unsigned being_defined : 1;
unsigned java_interface : 1;
unsigned debug_requested : 1;
unsigned fields_readonly : 1;
unsigned use_template : 2;
unsigned ptrmemfunc_flag : 1;
unsigned was_anonymous : 1;
unsigned lazy_default_ctor : 1;
unsigned lazy_copy_ctor : 1;
unsigned lazy_assignment_op : 1;
unsigned lazy_destructor : 1;
unsigned has_const_init_ref : 1;
unsigned has_complex_init_ref : 1;
unsigned has_complex_assign_ref : 1;
unsigned non_aggregate : 1;
/* APPLE LOCAL begin omit calls to empty destructors 5559195 */
unsigned has_nontrivial_destructor_body : 1;
unsigned destructor_nontrivial_because_of_base : 1;
unsigned destructor_triviality_final : 1;
/* APPLE LOCAL end omit calls to empty destructors 5559195 */
/* When adding a flag here, consider whether or not it ought to
apply to a template instance if it applies to the template. If
so, make sure to copy it in instantiate_class_template! */
/* There are some bits left to fill out a 32-bit word. Keep track
of this by updating the size of this bitfield whenever you add or
remove a flag. */
/* APPLE LOCAL begin omit calls to empty destructors 5559195 */
unsigned dummy : 10;
/* APPLE LOCAL end omit calls to empty destructors 5559195 */
tree primary_base;
VEC(tree_pair_s,gc) *vcall_indices;
tree vtables;
tree typeinfo_var;
VEC(tree,gc) *vbases;
binding_table nested_udts;
tree as_base;
VEC(tree,gc) *pure_virtuals;
tree friend_classes;
VEC(tree,gc) * GTY((reorder ("resort_type_method_vec"))) methods;
tree key_method;
tree decl_list;
tree template_info;
tree befriending_classes;
/* In a RECORD_TYPE, information specific to Objective-C++, such
as a list of adopted protocols or a pointer to a corresponding
@interface. See objc/objc-act.h for details. */
tree objc_info;
};
struct lang_type_ptrmem GTY(())
{
struct lang_type_header h;
tree record;
};
struct lang_type GTY(())
{
union lang_type_u
{
struct lang_type_header GTY((skip (""))) h;
struct lang_type_class GTY((tag ("1"))) c;
struct lang_type_ptrmem GTY((tag ("0"))) ptrmem;
} GTY((desc ("%h.h.is_lang_type_class"))) u;
};
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_TYPE_CLASS_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (! lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.c; })
#define LANG_TYPE_PTRMEM_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ptrmem; })
#else
#define LANG_TYPE_CLASS_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.c)
#define LANG_TYPE_PTRMEM_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.ptrmem)
#endif /* ENABLE_TREE_CHECKING */
/* Fields used for storing information before the class is defined.
After the class is defined, these fields hold other information. */
/* VEC(tree) of friends which were defined inline in this class
definition. */
#define CLASSTYPE_INLINE_FRIENDS(NODE) CLASSTYPE_PURE_VIRTUALS (NODE)
/* Nonzero for _CLASSTYPE means that operator delete is defined. */
#define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete)
#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1)
/* Nonzero if `new NODE[x]' should cause the allocation of extra
storage to indicate how many array elements are in use. */
#define TYPE_VEC_NEW_USES_COOKIE(NODE) \
(CLASS_TYPE_P (NODE) \
&& LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie)
/* Nonzero means that this _CLASSTYPE node defines ways of converting
itself to other types. */
#define TYPE_HAS_CONVERSION(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_type_conversion)
/* Nonzero means that NODE (a class type) has a default constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor)
/* Nonzero means that NODE (a class type) has a copy constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_ASSIGNMENT_OP(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_assignment_op)
/* Nonzero means that NODE (a class type) has a destructor -- but that
it has not yet been declared. */
#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor)
/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */
#define TYPE_HAS_ASSIGN_REF(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_assign_ref)
/* True iff the class type NODE has an "operator =" whose parameter
has a parameter of type "const X&". */
#define TYPE_HAS_CONST_ASSIGN_REF(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_const_assign_ref)
/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */
#define TYPE_HAS_INIT_REF(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->h.has_init_ref)
#define TYPE_HAS_CONST_INIT_REF(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_init_ref)
/* Nonzero if this class defines an overloaded operator new. (An
operator new [] doesn't count.) */
#define TYPE_HAS_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_new)
/* Nonzero if this class defines an overloaded operator new[]. */
#define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_array_new)
/* Nonzero means that this type is being defined. I.e., the left brace
starting the definition of this type has been seen. */
#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined)
/* Mark bits for repeated base checks. */
#define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE))
/* Nonzero if the class NODE has multiple paths to the same (virtual)
base object. */
#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped)
/* Nonzero if the class NODE has multiple instances of the same base
type. */
#define CLASSTYPE_REPEATED_BASE_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->repeated_base)
/* The member function with which the vtable will be emitted:
the first noninline non-pure-virtual member function. NULL_TREE
if there is no key function or if this is a class template */
#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method)
/* Vector member functions defined in this class. Each element is
either a FUNCTION_DECL, a TEMPLATE_DECL, or an OVERLOAD. All
functions with the same name end up in the same slot. The first
two elements are for constructors, and destructors, respectively.
All template conversion operators to innermost template dependent
types are overloaded on the next slot, if they exist. Note, the
names for these functions will not all be the same. The
non-template conversion operators & templated conversions to
non-innermost template types are next, followed by ordinary member
functions. There may be empty entries at the end of the vector.
The conversion operators are unsorted. The ordinary member
functions are sorted, once the class is complete. */
#define CLASSTYPE_METHOD_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->methods)
/* For class templates, this is a TREE_LIST of all member data,
functions, types, and friends in the order of declaration.
The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend,
and the RECORD_TYPE for the class template otherwise. */
#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list)
/* The slot in the CLASSTYPE_METHOD_VEC where constructors go. */
#define CLASSTYPE_CONSTRUCTOR_SLOT 0
/* The slot in the CLASSTYPE_METHOD_VEC where destructors go. */
#define CLASSTYPE_DESTRUCTOR_SLOT 1
/* The first slot in the CLASSTYPE_METHOD_VEC where conversion
operators can appear. */
#define CLASSTYPE_FIRST_CONVERSION_SLOT 2
/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
are the constructors that take an in-charge parameter. */
#define CLASSTYPE_CONSTRUCTORS(NODE) \
(VEC_index (tree, CLASSTYPE_METHOD_VEC (NODE), CLASSTYPE_CONSTRUCTOR_SLOT))
/* A FUNCTION_DECL for the destructor for NODE. These are the
destructors that take an in-charge parameter. If
CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL
until the destructor is created with lazily_declare_fn. */
#define CLASSTYPE_DESTRUCTORS(NODE) \
(CLASSTYPE_METHOD_VEC (NODE) \
? VEC_index (tree, CLASSTYPE_METHOD_VEC (NODE), CLASSTYPE_DESTRUCTOR_SLOT) \
: NULL_TREE)
/* A dictionary of the nested user-defined-types (class-types, or enums)
found within this class. This table includes nested member class
templates. */
#define CLASSTYPE_NESTED_UTDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nested_udts)
/* Nonzero if NODE has a primary base class, i.e., a base class with
which it shares the virtual function table pointer. */
#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \
(CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE)
/* If non-NULL, this is the binfo for the primary base class, i.e.,
the base class which contains the virtual function table pointer
for this class. */
#define CLASSTYPE_PRIMARY_BINFO(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->primary_base)
/* A vector of BINFOs for the direct and indirect virtual base classes
that this type uses in a post-order depth-first left-to-right
order. (In other words, these bases appear in the order that they
should be initialized.) */
#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
/* The type corresponding to NODE when NODE is used as a base class,
i.e., NODE without virtual base classes. */
#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base)
/* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */
#define IS_FAKE_BASE_TYPE(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \
&& CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE))
/* These are the size and alignment of the type without its virtual
base classes, for when we use this type as a base itself. */
#define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE))
/* The alignment of NODE, without its virtual bases, in bytes. */
#define CLASSTYPE_ALIGN_UNIT(NODE) \
(CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT)
/* True if this a Java interface type, declared with
'__attribute__ ((java_interface))'. */
#define TYPE_JAVA_INTERFACE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->java_interface)
/* A VEC(tree) of virtual functions which cannot be inherited by
derived classes. When deriving from this type, the derived
class must provide its own definition for each of these functions. */
#define CLASSTYPE_PURE_VIRTUALS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals)
/* Nonzero means that this type has an X() constructor. */
#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_default_ctor)
/* Nonzero means that this type contains a mutable member. */
#define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable)
#define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE))
/* Nonzero means that this class type is a non-POD class. */
#define CLASSTYPE_NON_POD_P(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class)
/* Nonzero means that this class contains pod types whose default
initialization is not a zero initialization (namely, pointers to
data members). */
#define CLASSTYPE_NON_ZERO_INIT_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init)
/* Nonzero if this class is "empty" in the sense of the C++ ABI. */
#define CLASSTYPE_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->empty_p)
/* Nonzero if this class is "nearly empty", i.e., contains only a
virtual function table pointer. */
#define CLASSTYPE_NEARLY_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p)
/* Nonzero if this class contains an empty subobject. */
#define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p)
/* A list of class types of which this type is a friend. The
TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the
case of a template friend. */
#define CLASSTYPE_FRIEND_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->friend_classes)
/* A list of the classes which grant friendship to this class. */
#define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes)
/* Say whether this node was declared as a "class" or a "struct". */
#define CLASSTYPE_DECLARED_CLASS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->declared_class)
/* Nonzero if this class has const members
which have no specified initialization. */
#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init : 0)
#define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init = (VALUE))
/* Nonzero if this class has ref members
which have no specified initialization. */
#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init : 0)
#define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init = (VALUE))
/* Nonzero if this class is included from a header file which employs
`#pragma interface', and it is not included in its implementation file. */
#define CLASSTYPE_INTERFACE_ONLY(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_only)
/* True if we have already determined whether or not vtables, VTTs,
typeinfo, and other similar per-class data should be emitted in
this translation unit. This flag does not indicate whether or not
these items should be emitted; it only indicates that we know one
way or the other. */
#define CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0)
/* The opposite of CLASSTYPE_INTERFACE_KNOWN. */
#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown)
#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X))
#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1)
#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0)
/* Nonzero if a _DECL node requires us to output debug info for this class. */
#define CLASSTYPE_DEBUG_REQUESTED(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->debug_requested)
/* Additional macros for inheritance information. */
/* Nonzero means that this class is on a path leading to a new vtable. */
#define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE)
/* Nonzero means B (a BINFO) has its own vtable. Any copies will not
have this flag set. */
#define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B))
/* Compare a BINFO_TYPE with another type for equality. For a binfo,
this is functionally equivalent to using same_type_p, but
measurably faster. At least one of the arguments must be a
BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If
BINFO_TYPE(T) ever stops being the main variant of the class the
binfo is for, this macro must change. */
#define SAME_BINFO_TYPE_P(A, B) ((A) == (B))
/* Any subobject that needs a new vtable must have a vptr and must not
be a non-virtual primary base (since it would then use the vtable from a
derived class and never become non-primary.) */
#define SET_BINFO_NEW_VTABLE_MARKED(B) \
(BINFO_NEW_VTABLE_MARKED (B) = 1, \
gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \
gcc_assert (TYPE_VFIELD (BINFO_TYPE (B))))
/* Nonzero if this binfo is for a dependent base - one that should not
be searched. */
#define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE)
/* Nonzero if this binfo has lost its primary base binfo (because that
is a nearly-empty virtual base that has been taken by some other
base in the complete hierarchy. */
#define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE)
/* Nonzero if this BINFO is a primary base class. */
#define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE)
/* Used by various search routines. */
#define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_0 (NODE)
/* A VEC(tree_pair_s) of the vcall indices associated with the class
NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual
function. The VALUE is the index into the virtual table where the
vcall offset for that function is stored, when NODE is a virtual
base. */
#define CLASSTYPE_VCALL_INDICES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices)
/* The various vtables for the class NODE. The primary vtable will be
first, followed by the construction vtables and VTT, if any. */
#define CLASSTYPE_VTABLES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vtables)
/* The std::type_info variable representing this class, or NULL if no
such variable has been created. This field is only set for the
TYPE_MAIN_VARIANT of the class. */
#define CLASSTYPE_TYPEINFO_VAR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
/* Accessor macros for the BINFO_VIRTUALS list. */
/* The number of bytes by which to adjust the `this' pointer when
calling this virtual function. Subtract this value from the this
pointer. Always non-NULL, might be constant zero though. */
#define BV_DELTA(NODE) (TREE_PURPOSE (NODE))
/* If non-NULL, the vtable index at which to find the vcall offset
when calling this virtual function. Add the value at that vtable
index to the this pointer. */
#define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE))
/* The function to call. */
#define BV_FN(NODE) (TREE_VALUE (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE
will be NULL_TREE to indicate a throw specification of `()', or
no exceptions allowed. */
#define TYPE_RAISES_EXCEPTIONS(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()'. */
#define TYPE_NOTHROW_P(NODE) \
(TYPE_RAISES_EXCEPTIONS (NODE) \
&& TREE_VALUE (TYPE_RAISES_EXCEPTIONS (NODE)) == NULL_TREE)
/* The binding level associated with the namespace. */
#define NAMESPACE_LEVEL(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.u.level)
/* If a DECL has DECL_LANG_SPECIFIC, it is either a lang_decl_flags or
a lang_decl (which has lang_decl_flags as its initial prefix).
This macro is nonzero for tree nodes whose DECL_LANG_SPECIFIC is
the full lang_decl, and not just lang_decl_flags. Keep these
checks in ascending code order. */
#define CAN_HAVE_FULL_LANG_DECL_P(NODE) \
(!(TREE_CODE (NODE) == FIELD_DECL \
|| TREE_CODE (NODE) == VAR_DECL \
|| TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == USING_DECL))
struct lang_decl_flags GTY(())
{
ENUM_BITFIELD(languages) language : 4;
unsigned global_ctor_p : 1;
unsigned global_dtor_p : 1;
unsigned anticipated_p : 1;
unsigned template_conv_p : 1;
unsigned operator_attr : 1;
unsigned constructor_attr : 1;
unsigned destructor_attr : 1;
unsigned friend_attr : 1;
unsigned static_function : 1;
unsigned pure_virtual : 1;
unsigned has_in_charge_parm_p : 1;
unsigned has_vtt_parm_p : 1;
unsigned deferred : 1;
unsigned use_template : 2;
unsigned nonconverting : 1;
unsigned not_really_extern : 1;
unsigned initialized_in_class : 1;
unsigned assignment_operator_p : 1;
unsigned u1sel : 1;
unsigned u2sel : 1;
unsigned can_be_full : 1;
unsigned thunk_p : 1;
unsigned this_thunk_p : 1;
unsigned repo_available_p : 1;
unsigned hidden_friend_p : 1;
unsigned threadprivate_p : 1;
/* One unused bit. */
union lang_decl_u {
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_ALIAS.
In a FUNCTION_DECL for which DECL_THUNK_P does not hold,
VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is
DECL_TEMPLATE_INFO. */
tree GTY ((tag ("0"))) template_info;
/* In a NAMESPACE_DECL, this is NAMESPACE_LEVEL. */
struct cp_binding_level * GTY ((tag ("1"))) level;
} GTY ((desc ("%1.u1sel"))) u;
union lang_decl_u2 {
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_VIRTUAL_OFFSET.
Otherwise this is DECL_ACCESS. */
tree GTY ((tag ("0"))) access;
/* For VAR_DECL in function, this is DECL_DISCRIMINATOR. */
int GTY ((tag ("1"))) discriminator;
} GTY ((desc ("%1.u2sel"))) u2;
};
/* sorted_fields is sorted based on a pointer, so we need to be able
to resort it if pointers get rearranged. */
struct lang_decl GTY(())
{
struct lang_decl_flags decl_flags;
union lang_decl_u4
{
struct full_lang_decl
{
/* In an overloaded operator, this is the value of
DECL_OVERLOADED_OPERATOR_P. */
ENUM_BITFIELD (tree_code) operator_code : 8;
unsigned u3sel : 1;
unsigned pending_inline_p : 1;
unsigned spare : 22;
/* For a non-thunk function decl, this is a tree list of
friendly classes. For a thunk function decl, it is the
thunked to function decl. */
tree befriending_classes;
/* For a non-virtual FUNCTION_DECL, this is
DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which
DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both
this pointer and result pointer adjusting thunks are
chained here. This pointer thunks to return pointer thunks
will be chained on the return pointer thunk. */
tree context;
union lang_decl_u5
{
/* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is
DECL_CLONED_FUNCTION. */
tree GTY ((tag ("0"))) cloned_function;
/* In a FUNCTION_DECL for which THUNK_P holds this is the
THUNK_FIXED_OFFSET. */
HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset;
} GTY ((desc ("%0.decl_flags.thunk_p"))) u5;
union lang_decl_u3
{
struct sorted_fields_type * GTY ((tag ("0"), reorder ("resort_sorted_fields")))
sorted_fields;
struct cp_token_cache * GTY ((tag ("2"))) pending_inline_info;
struct language_function * GTY ((tag ("1")))
saved_language_function;
} GTY ((desc ("%1.u3sel + %1.pending_inline_p"))) u;
} GTY ((tag ("1"))) f;
} GTY ((desc ("%1.decl_flags.can_be_full"))) u;
};
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (lt->decl_flags.u2sel != TF) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->decl_flags.u2; })
#else
#define LANG_DECL_U2_CHECK(NODE, TF) \
(&DECL_LANG_SPECIFIC (NODE)->decl_flags.u2)
#endif /* ENABLE_TREE_CHECKING */
/* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the
declaration. Some entities (like a member function in a local
class, or a local variable) do not have linkage at all, and this
macro should not be used in those cases.
Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was
created by language-independent code, and has C linkage. Most
VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but
we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */
#define DECL_LANGUAGE(NODE) \
(DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->decl_flags.language \
: (TREE_CODE (NODE) == FUNCTION_DECL \
? lang_c : lang_cplusplus))
/* Set the language linkage for NODE to LANGUAGE. */
#define SET_DECL_LANGUAGE(NODE, LANGUAGE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.language = (LANGUAGE))
/* For FUNCTION_DECLs: nonzero means that this function is a constructor. */
#define DECL_CONSTRUCTOR_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.constructor_attr)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete
object. */
#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base
object. */
#define DECL_BASE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
specialized in-charge constructor or the specialized not-in-charge
constructor. */
#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */
#define DECL_COPY_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0)
/* Nonzero if NODE is a destructor. */
#define DECL_DESTRUCTOR_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.destructor_attr)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
specialized in-charge constructor, in-charge deleting constructor,
or the base destructor. */
#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) && !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object. */
#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base
object. */
#define DECL_BASE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object that deletes the object after it has been destroyed. */
#define DECL_DELETING_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == deleting_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or
destructor. */
#define DECL_CLONED_FUNCTION_P(NODE) \
((TREE_CODE (NODE) == FUNCTION_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL) \
&& DECL_LANG_SPECIFIC (NODE) \
&& !DECL_LANG_SPECIFIC (NODE)->decl_flags.thunk_p \
&& DECL_CLONED_FUNCTION (NODE) != NULL_TREE)
/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
cloned. */
#define DECL_CLONED_FUNCTION(NODE) \
(DECL_LANG_SPECIFIC (NON_THUNK_FUNCTION_CHECK(NODE))->u.f.u5.cloned_function)
/* Perform an action for each clone of FN, if FN is a function with
clones. This macro should be used like:
FOR_EACH_CLONE (clone, fn)
{ ... }
*/
#define FOR_EACH_CLONE(CLONE, FN) \
if (TREE_CODE (FN) == FUNCTION_DECL \
&& (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \
|| DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))) \
for (CLONE = TREE_CHAIN (FN); \
CLONE && DECL_CLONED_FUNCTION_P (CLONE); \
CLONE = TREE_CHAIN (CLONE))
/* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */
#define DECL_DISCRIMINATOR_P(NODE) \
(TREE_CODE (NODE) == VAR_DECL \
&& DECL_FUNCTION_SCOPE_P (NODE))
/* Discriminator for name mangling. */
#define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator)
/* Nonzero if the VTT parm has been added to NODE. */
#define DECL_HAS_VTT_PARM_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.has_vtt_parm_p)
/* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is
required. */
#define DECL_NEEDS_VTT_PARM_P(NODE) \
(CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \
&& (DECL_BASE_CONSTRUCTOR_P (NODE) \
|| DECL_BASE_DESTRUCTOR_P (NODE)))
/* Nonzero if NODE is a user-defined conversion operator. */
#define DECL_CONV_FN_P(NODE) \
(DECL_NAME (NODE) && IDENTIFIER_TYPENAME_P (DECL_NAME (NODE)))
/* If FN is a conversion operator, the type to which it converts.
Otherwise, NULL_TREE. */
#define DECL_CONV_FN_TYPE(FN) \
(DECL_CONV_FN_P (FN) ? TREE_TYPE (DECL_NAME (FN)) : NULL_TREE)
/* Nonzero if NODE, which is a TEMPLATE_DECL, is a template
conversion operator to a type dependent on the innermost template
args. */
#define DECL_TEMPLATE_CONV_FN_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.template_conv_p)
/* Set the overloaded operator code for NODE to CODE. */
#define SET_OVERLOADED_OPERATOR_CODE(NODE, CODE) \
(DECL_LANG_SPECIFIC (NODE)->u.f.operator_code = (CODE))
/* If NODE is an overloaded operator, then this returns the TREE_CODE
associated with the overloaded operator.
DECL_ASSIGNMENT_OPERATOR_P must also be checked to determine
whether or not NODE is an assignment operator. If NODE is not an
overloaded operator, ERROR_MARK is returned. Since the numerical
value of ERROR_MARK is zero, this macro can be used as a predicate
to test whether or not NODE is an overloaded operator. */
#define DECL_OVERLOADED_OPERATOR_P(NODE) \
(IDENTIFIER_OPNAME_P (DECL_NAME (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.f.operator_code : ERROR_MARK)
/* Nonzero if NODE is an assignment operator. */
#define DECL_ASSIGNMENT_OPERATOR_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.assignment_operator_p)
/* For FUNCTION_DECLs: nonzero means that this function is a
constructor or a destructor with an extra in-charge parameter to
control whether or not virtual bases are constructed. */
#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.has_in_charge_parm_p)
/* Nonzero if DECL is a declaration of __builtin_constant_p. */
#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \
&& DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P)
/* Nonzero for _DECL means that this decl appears in (or will appear
in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
detecting circularity in case members are multiply defined. In the
case of a VAR_DECL, it is also used to determine how program storage
should be allocated. */
#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE))
/* Nonzero for a VAR_DECL means that the variable's initialization (if
any) has been processed. (In general, DECL_INITIALIZED_P is
!DECL_EXTERN, but static data members may be initialized even if
not defined.) */
#define DECL_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL iff an explicit initializer was provided. */
#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL that was initialized with a
constant-expression. */
#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \
(TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL that can be used in an integral constant
expression.
[expr.const]
An integral constant-expression can only involve ... const
variables of static or enumeration types initialized with
constant expressions ...
The standard does not require that the expression be non-volatile.
G++ implements the proposed correction in DR 457. */
#define DECL_INTEGRAL_CONSTANT_VAR_P(NODE) \
(TREE_CODE (NODE) == VAR_DECL \
&& CP_TYPE_CONST_NON_VOLATILE_P (TREE_TYPE (NODE)) \
&& INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (NODE)) \
&& DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (NODE))
/* Nonzero if the DECL was initialized in the class definition itself,
rather than outside the class. This is used for both static member
VAR_DECLS, and FUNTION_DECLS that are defined in the class. */
#define DECL_INITIALIZED_IN_CLASS_P(DECL) \
(DECL_LANG_SPECIFIC (DECL)->decl_flags.initialized_in_class)
/* Nonzero for DECL means that this decl is just a friend declaration,
and should not be added to the list of members for this class. */
#define DECL_FRIEND_P(NODE) (DECL_LANG_SPECIFIC (NODE)->decl_flags.friend_attr)
/* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */
#define DECL_BEFRIENDING_CLASSES(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.f.befriending_classes)
/* Nonzero for FUNCTION_DECL means that this decl is a static
member function. */
#define DECL_STATIC_FUNCTION_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.static_function)
/* Nonzero for FUNCTION_DECL means that this decl is a non-static
member function. */
#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
(TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
/* Nonzero for FUNCTION_DECL means that this decl is a member function
(static or non-static). */
#define DECL_FUNCTION_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as const X *const. */
#define DECL_CONST_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as volatile X *const. */
#define DECL_VOLATILE_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for a DECL means that this member is a non-static member. */
#define DECL_NONSTATIC_MEMBER_P(NODE) \
((TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE)) \
|| TREE_CODE (NODE) == FIELD_DECL)
/* Nonzero for _DECL means that this member object type
is mutable. */
#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE))
/* Nonzero for _DECL means that this constructor is a non-converting
constructor. */
#define DECL_NONCONVERTING_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.nonconverting)
/* Nonzero for FUNCTION_DECL means that this member function is a pure
virtual function. */
#define DECL_PURE_VIRTUAL_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.pure_virtual)
/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an
invalid overrider for a function from a base class. Once we have
complained about an invalid overrider we avoid complaining about it
again. */
#define DECL_INVALID_OVERRIDER_P(NODE) \
(DECL_LANG_FLAG_4 (NODE))
/* The thunks associated with NODE, a FUNCTION_DECL. */
#define DECL_THUNKS(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.f.context)
/* Nonzero if NODE is a thunk, rather than an ordinary function. */
#define DECL_THUNK_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_LANG_SPECIFIC (NODE) \
&& DECL_LANG_SPECIFIC (NODE)->decl_flags.thunk_p)
/* Set DECL_THUNK_P for node. */
#define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.thunk_p = 1, \
DECL_LANG_SPECIFIC (NODE)->u.f.u3sel = 1, \
DECL_LANG_SPECIFIC (NODE)->decl_flags.this_thunk_p = (THIS_ADJUSTING))
/* Nonzero if NODE is a this pointer adjusting thunk. */
#define DECL_THIS_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && DECL_LANG_SPECIFIC (NODE)->decl_flags.this_thunk_p)
/* Nonzero if NODE is a result pointer adjusting thunk. */
#define DECL_RESULT_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && !DECL_LANG_SPECIFIC (NODE)->decl_flags.this_thunk_p)
/* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */
#define DECL_NON_THUNK_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE))
/* Nonzero if NODE is `extern "C"'. */
#define DECL_EXTERN_C_P(NODE) \
(DECL_LANGUAGE (NODE) == lang_c)
/* Nonzero if NODE is an `extern "C"' function. */
#define DECL_EXTERN_C_FUNCTION_P(NODE) \
(DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE))
/* True iff DECL is an entity with vague linkage whose definition is
available in this translation unit. */
#define DECL_REPO_AVAILABLE_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.repo_available_p)
/* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a
template function. */
#define DECL_PRETTY_FUNCTION_P(NODE) \
(TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)))
/* The _TYPE context in which this _DECL appears. This field holds the
class where a virtual function instance is actually defined. */
#define DECL_CLASS_CONTEXT(NODE) \
(DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE)
/* For a non-member friend function, the class (if any) in which this
friend was defined. For example, given:
struct S { friend void f (); };
the DECL_FRIEND_CONTEXT for `f' will be `S'. */
#define DECL_FRIEND_CONTEXT(NODE) \
((DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.f.context \
: NULL_TREE)
/* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */
#define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \
(DECL_LANG_SPECIFIC (NODE)->u.f.context = (CONTEXT))
/* NULL_TREE in DECL_CONTEXT represents the global namespace. */
#define CP_DECL_CONTEXT(NODE) \
(DECL_CONTEXT (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
#define CP_TYPE_CONTEXT(NODE) \
(TYPE_CONTEXT (NODE) ? TYPE_CONTEXT (NODE) : global_namespace)
#define FROB_CONTEXT(NODE) ((NODE) == global_namespace ? NULL_TREE : (NODE))
/* 1 iff NODE has namespace scope, including the global namespace. */
#define DECL_NAMESPACE_SCOPE_P(NODE) \
(!DECL_TEMPLATE_PARM_P (NODE) \
&& TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL)
#define TYPE_NAMESPACE_SCOPE_P(NODE) \
(TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL)
/* 1 iff NODE is a class member. */
#define DECL_CLASS_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE)))
#define TYPE_CLASS_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE)))
/* 1 iff NODE is function-local. */
#define DECL_FUNCTION_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) \
&& TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL)
#define TYPE_FUNCTION_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) \
&& TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL)
/* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for
both the primary typeinfo object and the associated NTBS name. */
#define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))
/* 1 iff VAR_DECL node NODE is virtual table or VTT. */
#define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE))
/* Returns 1 iff VAR_DECL is a construction virtual table.
DECL_VTABLE_OR_VTT_P will be true in this case and must be checked
before using this macro. */
#define DECL_CONSTRUCTION_VTABLE_P(NODE) \
TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE))
/* 1 iff NODE is function-local, but for types. */
#define LOCAL_CLASS_P(NODE) \
(decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE)
/* For a NAMESPACE_DECL: the list of using namespace directives
The PURPOSE is the used namespace, the value is the namespace
that is the common ancestor. */
#define DECL_NAMESPACE_USING(NODE) DECL_VINDEX (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, the DECL_INITIAL is used to record all users
of a namespace, to record the transitive closure of using namespace. */
#define DECL_NAMESPACE_USERS(NODE) DECL_INITIAL (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, the list of namespaces which have associated
themselves with this one. */
#define DECL_NAMESPACE_ASSOCIATIONS(NODE) \
(NAMESPACE_DECL_CHECK (NODE)->decl_non_common.saved_tree)
/* In a NAMESPACE_DECL, points to the original namespace if this is
a namespace alias. */
#define DECL_NAMESPACE_ALIAS(NODE) \
DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE))
#define ORIGINAL_NAMESPACE(NODE) \
(DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE))
/* Nonzero if NODE is the std namespace. */
#define DECL_NAMESPACE_STD_P(NODE) \
(TREE_CODE (NODE) == NAMESPACE_DECL \
&& CP_DECL_CONTEXT (NODE) == global_namespace \
&& DECL_NAME (NODE) == std_identifier)
/* In a TREE_LIST concatenating using directives, indicate indirect
directives */
#define TREE_INDIRECT_USING(NODE) (TREE_LIST_CHECK (NODE)->common.lang_flag_0)
extern tree decl_shadowed_for_var_lookup (tree);
extern void decl_shadowed_for_var_insert (tree, tree);
/* Non zero if this is a using decl for a dependent scope. */
#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE))
/* The scope named in a using decl. */
#define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE))
/* The decls named by a using decl. */
#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE))
/* In a VAR_DECL, true if we have a shadowed local variable
in the shadowed var table for this VAR_DECL. */
#define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \
(VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p)
/* In a VAR_DECL for a variable declared in a for statement,
this is the shadowed (local) variable. */
#define DECL_SHADOWED_FOR_VAR(NODE) \
(DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL)
#define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \
(decl_shadowed_for_var_insert (NODE, VAL))
/* In a FUNCTION_DECL, this is nonzero if this function was defined in
the class definition. We have saved away the text of the function,
but have not yet processed it. */
#define DECL_PENDING_INLINE_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.f.pending_inline_p)
/* If DECL_PENDING_INLINE_P holds, this is the saved text of the
function. */
#define DECL_PENDING_INLINE_INFO(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.f.u.pending_inline_info)
/* For a TYPE_DECL: if this structure has many fields, we'll sort them
and put them into a TREE_VEC. */
#define DECL_SORTED_FIELDS(NODE) \
(DECL_LANG_SPECIFIC (TYPE_DECL_CHECK (NODE))->u.f.u.sorted_fields)
/* True if on the deferred_fns (see decl2.c) list. */
#define DECL_DEFERRED_FN(DECL) \
(DECL_LANG_SPECIFIC (DECL)->decl_flags.deferred)
/* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or
TEMPLATE_DECL, the entity is either a template specialization (if
DECL_USE_TEMPLATE is non-zero) or the abstract instance of the
template itself.
In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose
TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a
specialization or abstract instance. The TREE_VALUE is the
template arguments used to specialize the template.
Consider:
template <typename T> struct S { friend void f(T) {} };
In this case, S<int>::f is, from the point of view of the compiler,
an instantiation of a template -- but, from the point of view of
the language, each instantiation of S results in a wholly unrelated
global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f
will be non-NULL, but DECL_USE_TEMPLATE will be zero. */
#define DECL_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK (NODE)) \
->decl_flags.u.template_info)
/* For a VAR_DECL, indicates that the variable is actually a
non-static data member of anonymous union that has been promoted to
variable status. */
#define DECL_ANON_UNION_VAR_P(NODE) \
(DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)))
/* Template information for a RECORD_TYPE or UNION_TYPE. */
#define CLASSTYPE_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (RECORD_OR_UNION_CHECK (NODE))->template_info)
/* Template information for an ENUMERAL_TYPE. Although an enumeration may
not be a primary template, it may be declared within the scope of a
primary template and the enumeration constants may depend on
non-type template parameters. */
#define ENUM_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (ENUMERAL_TYPE_CHECK (NODE)))
/* Template information for a template template parameter. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)) \
->template_info)
/* Template information for an ENUMERAL_, RECORD_, or UNION_TYPE. */
#define TYPE_TEMPLATE_INFO(NODE) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
? ENUM_TEMPLATE_INFO (NODE) : \
(TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \
? TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (NODE) : \
(TYPE_LANG_SPECIFIC (NODE) \
? CLASSTYPE_TEMPLATE_INFO (NODE) \
: NULL_TREE)))
/* Set the template information for an ENUMERAL_, RECORD_, or
UNION_TYPE to VAL. */
#define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
? (ENUM_TEMPLATE_INFO (NODE) = (VAL)) \
: (CLASSTYPE_TEMPLATE_INFO (NODE) = (VAL)))
#define TI_TEMPLATE(NODE) (TREE_PURPOSE (NODE))
#define TI_ARGS(NODE) (TREE_VALUE (NODE))
#define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE)
/* We use TREE_VECs to hold template arguments. If there is only one
level of template arguments, then the TREE_VEC contains the
arguments directly. If there is more than one level of template
arguments, then each entry in the TREE_VEC is itself a TREE_VEC,
containing the template arguments for a single level. The first
entry in the outer TREE_VEC is the outermost level of template
parameters; the last is the innermost.
It is incorrect to ever form a template argument vector containing
only one level of arguments, but which is a TREE_VEC containing as
its only entry the TREE_VEC for that level. */
/* Nonzero if the template arguments is actually a vector of vectors,
rather than just a vector. */
#define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \
(NODE && TREE_VEC_ELT (NODE, 0) \
&& TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC)
/* The depth of a template argument vector. When called directly by
the parser, we use a TREE_LIST rather than a TREE_VEC to represent
template arguments. In fact, we may even see NULL_TREE if there
are no template arguments. In both of those cases, there is only
one level of template arguments. */
#define TMPL_ARGS_DEPTH(NODE) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1)
/* The LEVELth level of the template ARGS. The outermost level of
args is level 1, not level 0. */
#define TMPL_ARGS_LEVEL(ARGS, LEVEL) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \
? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS))
/* Set the LEVELth level of the template ARGS to VAL. This macro does
not work with single-level argument vectors. */
#define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \
(TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL))
/* Accesses the IDXth parameter in the LEVELth level of the ARGS. */
#define TMPL_ARG(ARGS, LEVEL, IDX) \
(TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX))
/* Given a single level of template arguments in NODE, return the
number of arguments. */
#define NUM_TMPL_ARGS(NODE) \
(TREE_VEC_LENGTH (NODE))
/* Returns the innermost level of template arguments in ARGS. */
#define INNERMOST_TEMPLATE_ARGS(NODE) \
(get_innermost_template_args ((NODE), 1))
/* The number of levels of template parameters given by NODE. */
#define TMPL_PARMS_DEPTH(NODE) \
((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE)))
/* The TEMPLATE_DECL instantiated or specialized by NODE. This
TEMPLATE_DECL will be the immediate parent, not the most general
template. For example, in:
template <class T> struct S { template <class U> void f(U); }
the FUNCTION_DECL for S<int>::f<double> will have, as its
DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'.
As a special case, for a member friend template of a template
class, this value will not be a TEMPLATE_DECL, but rather an
IDENTIFIER_NODE or OVERLOAD indicating the name of the template and
any explicit template arguments provided. For example, in:
template <class T> struct S { friend void f<int>(int, double); }
the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the
DECL_TI_ARGS will be {int}. */
#define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE))
/* The template arguments used to obtain this decl from the most
general form of DECL_TI_TEMPLATE. For the example given for
DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These
are always the full set of arguments required to instantiate this
declaration from the most general template specialized here. */
#define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE))
/* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE
will be generated from a partial specialization, the TEMPLATE_DECL
referred to here will be the original template. For example,
given:
template <typename T> struct S {};
template <typename T> struct S<T*> {};
the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */
#define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE))
#define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE))
/* For a template instantiation TYPE, returns the TYPE corresponding
to the primary template. Otherwise returns TYPE itself. */
#define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \
((CLASSTYPE_USE_TEMPLATE ((TYPE)) \
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \
? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \
(CLASSTYPE_TI_TEMPLATE ((TYPE))))) \
: (TYPE))
/* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */
#define TYPE_TI_TEMPLATE(NODE) \
(TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE)))
/* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */
#define TYPE_TI_ARGS(NODE) \
(TI_ARGS (TYPE_TEMPLATE_INFO (NODE)))
#define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE)
/* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the
sense of [temp.mem]. */
#define DECL_MEMBER_TEMPLATE_P(NODE) \
(DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE)))
/* Nonzero if the NODE corresponds to the template parameters for a
member template, whose inline definition is being processed after
the class definition is complete. */
#define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE)
/* In a FUNCTION_DECL, the saved language-specific per-function data. */
#define DECL_SAVED_FUNCTION_DATA(NODE) \
(DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE)) \
->u.f.u.saved_language_function)
/* Indicates an indirect_expr is for converting a reference. */
#define REFERENCE_REF_P(NODE) \
TREE_LANG_FLAG_0 (INDIRECT_REF_CHECK (NODE))
#define NEW_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_VEC(NODE) \
TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE))
/* Indicates that this is a non-dependent COMPOUND_EXPR which will
resolve to a function call. */
#define COMPOUND_EXPR_OVERLOADED(NODE) \
TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE))
/* In a CALL_EXPR appearing in a template, true if Koenig lookup
should be performed at instantiation time. */
#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE))
/* Indicates whether a string literal has been parenthesized. Such
usages are disallowed in certain circumstances. */
#define PAREN_STRING_LITERAL_P(NODE) \
TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE))
/* Nonzero if this AGGR_INIT_EXPR provides for initialization via a
constructor call, rather than an ordinary function call. */
#define AGGR_INIT_VIA_CTOR_P(NODE) \
TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE))
/* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a
TEMPLATE_DECL. This macro determines whether or not a given class
type is really a template type, as opposed to an instantiation or
specialization of one. */
#define CLASSTYPE_IS_TEMPLATE(NODE) \
(CLASSTYPE_TEMPLATE_INFO (NODE) \
&& !CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
/* The name used by the user to name the typename type. Typically,
this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the
corresponding TYPE_DECL. However, this may also be a
TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */
#define TYPENAME_TYPE_FULLNAME(NODE) (TYPENAME_TYPE_CHECK (NODE))->type.values
/* True if a TYPENAME_TYPE was declared as an "enum". */
#define TYPENAME_IS_ENUM_P(NODE) \
(TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as a "class", "struct", or
"union". */
#define TYPENAME_IS_CLASS_P(NODE) \
(TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE)))
/* Nonzero in INTEGER_CST means that this int is negative by dint of
using a twos-complement negated operand. */
#define TREE_NEGATED_INT(NODE) TREE_LANG_FLAG_0 (INTEGER_CST_CHECK (NODE))
/* [class.virtual]
A class that declares or inherits a virtual function is called a
polymorphic class. */
#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE))
/* Nonzero if this class has a virtual function table pointer. */
#define TYPE_CONTAINS_VPTR_P(NODE) \
(TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE))
/* This flag is true of a local VAR_DECL if it was declared in a for
statement, but we are no longer in the scope of the for. */
#define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE))
/* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL
if we already emitted a warning about using it. */
#define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))
/* Nonzero if NODE is a FUNCTION_DECL (for a function with global
scope) declared in a local scope. */
#define DECL_LOCAL_FUNCTION_P(NODE) \
DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is a DECL which we know about but which has not
been explicitly declared, such as a built-in function or a friend
declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P
will be set. */
#define DECL_ANTICIPATED(NODE) \
(DECL_LANG_SPECIFIC (DECL_COMMON_CHECK (NODE))->decl_flags.anticipated_p)
/* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend
within a class but has not been declared in the surrounding scope.
The function is invisible except via argument dependent lookup. */
#define DECL_HIDDEN_FRIEND_P(NODE) \
(DECL_LANG_SPECIFIC (DECL_COMMON_CHECK (NODE))->decl_flags.hidden_friend_p)
/* Nonzero if DECL has been declared threadprivate by
#pragma omp threadprivate. */
#define CP_DECL_THREADPRIVATE_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->decl_flags.threadprivate_p)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* Returns nonzero if DECL has external linkage, as specified by the
language standard. (This predicate may hold even when the
corresponding entity is not actually given external linkage in the
object file; see decl_linkage for details.) */
#define DECL_EXTERNAL_LINKAGE_P(DECL) \
(decl_linkage (DECL) == lk_external)
/* Keep these codes in ascending code order. */
#define INTEGRAL_CODE_P(CODE) \
((CODE) == ENUMERAL_TYPE \
|| (CODE) == BOOLEAN_TYPE \
|| (CODE) == INTEGER_TYPE)
/* [basic.fundamental]
Types bool, char, wchar_t, and the signed and unsigned integer types
are collectively called integral types.
Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration
types as well, which is incorrect in C++. Keep these checks in
ascending code order. */
#define CP_INTEGRAL_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == BOOLEAN_TYPE \
|| TREE_CODE (TYPE) == INTEGER_TYPE)
/* Returns true if TYPE is an integral or enumeration name. Keep
these checks in ascending code order. */
#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE))
/* [basic.fundamental]
Integral and floating types are collectively called arithmetic
types.
As a GNU extension, we also accept complex types.
Keep these checks in ascending code order. */
#define ARITHMETIC_TYPE_P(TYPE) \
(CP_INTEGRAL_TYPE_P (TYPE) \
|| TREE_CODE (TYPE) == REAL_TYPE \
|| TREE_CODE (TYPE) == COMPLEX_TYPE)
/* [basic.types]
Arithmetic types, enumeration types, pointer types, and
pointer-to-member types, are collectively called scalar types.
Keep these checks in ascending code order. */
#define SCALAR_TYPE_P(TYPE) \
(TYPE_PTRMEM_P (TYPE) \
|| TREE_CODE (TYPE) == ENUMERAL_TYPE \
|| ARITHMETIC_TYPE_P (TYPE) \
|| TYPE_PTR_P (TYPE) \
/* APPLE LOCAL blocks 6040305 */ \
|| TREE_CODE (TYPE) == BLOCK_POINTER_TYPE \
|| TYPE_PTRMEMFUNC_P (TYPE))
/* [dcl.init.aggr]
An aggregate is an array or a class with no user-declared
constructors, no private or protected non-static data members, no
base classes, and no virtual functions.
As an extension, we also treat vectors as aggregates. Keep these
checks in ascending code order. */
#define CP_AGGREGATE_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == VECTOR_TYPE \
||TREE_CODE (TYPE) == ARRAY_TYPE \
|| (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE)))
/* Nonzero for a class type means that the class type has a
user-declared constructor. */
#define TYPE_HAS_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE))
/* When appearing in an INDIRECT_REF, it means that the tree structure
underneath is actually a call to a constructor. This is needed
when the constructor must initialize local storage (which can
be automatically destroyed), rather than allowing it to allocate
space from the heap.
When appearing in a SAVE_EXPR, it means that underneath
is a call to a constructor.
When appearing in a CONSTRUCTOR, the expression is a
compound literal.
When appearing in a FIELD_DECL, it means that this field
has been duly initialized in its constructor. */
#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE))
/* True if NODE is a brace-enclosed initializer. */
#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && !TREE_TYPE (NODE))
/* True if NODE is a compound-literal, i.e., a brace-enclosed
initializer cast to a particular type. */
#define COMPOUND_LITERAL_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
&& VEC_empty (constructor_elt, \
CONSTRUCTOR_ELTS (NODE)) \
&& !TREE_HAS_CONSTRUCTOR (NODE))
/* Nonzero means that an object of this type can not be initialized using
an initializer list. */
#define CLASSTYPE_NON_AGGREGATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate)
#define TYPE_NON_AGGREGATE_CLASS(NODE) \
(IS_AGGR_TYPE (NODE) && CLASSTYPE_NON_AGGREGATE (NODE))
/* Nonzero if there is a user-defined X::op=(x&) for this class. */
#define TYPE_HAS_COMPLEX_ASSIGN_REF(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_assign_ref)
#define TYPE_HAS_COMPLEX_INIT_REF(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_init_ref)
/* Nonzero if TYPE has a trivial destructor. From [class.dtor]:
A destructor is trivial if it is an implicitly declared
destructor and if:
- all of the direct base classes of its class have trivial
destructors,
- for all of the non-static data members of its class that are
of class type (or array thereof), each such class has a
trivial destructor. */
#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \
(!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE))
/* Nonzero for _TYPE node means that this type does not have a trivial
destructor. Therefore, destroying an object of this type will
involve a call to a destructor. This can apply to objects of
ARRAY_TYPE is the type of the elements needs a destructor. */
#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \
(TYPE_LANG_FLAG_4 (NODE))
/* APPLE LOCAL begin omit calls to empty destructors 5559195 */
/* One if the body of the destructor of class type NODE has been shown to do
nothing, else zero. */
#define CLASSTYPE_HAS_NONTRIVIAL_DESTRUCTOR_BODY(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_nontrivial_destructor_body)
/* One if destructor of this type must be called by its base classes because
one of its base classes' destructors must be called. */
#define CLASSTYPE_DESTRUCTOR_NONTRIVIAL_BECAUSE_OF_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->destructor_nontrivial_because_of_base)
/* One if the values of CLASSTYPE_DESTRUCTOR_NONTRIVIAL_BECAUSE_OF_BASE
and CLASSTYPE_HAS_NONTRIVIAL_DESTRUCTOR_BODY are final. */
#define CLASSTYPE_DESTRUCTOR_TRIVIALITY_FINAL(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->destructor_triviality_final)
/* APPLE LOCAL end omit calls to empty destructors 5559195 */
/* Nonzero for class type means that copy initialization of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_INIT_REF(NODE) \
(TYPE_HAS_INIT_REF (NODE) && ! TYPE_HAS_COMPLEX_INIT_REF (NODE))
/* Nonzero for class type means that assignment of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_ASSIGN_REF(NODE) \
(TYPE_HAS_ASSIGN_REF (NODE) && ! TYPE_HAS_COMPLEX_ASSIGN_REF (NODE))
/* Returns true if NODE is a pointer-to-data-member. */
#define TYPE_PTRMEM_P(NODE) \
(TREE_CODE (NODE) == OFFSET_TYPE)
/* Returns true if NODE is a pointer. */
#define TYPE_PTR_P(NODE) \
(TREE_CODE (NODE) == POINTER_TYPE)
/* Returns true if NODE is an object type:
[basic.types]
An object type is a (possibly cv-qualified) type that is not a
function type, not a reference type, and not a void type.
Keep these checks in ascending order, for speed. */
#define TYPE_OBJ_P(NODE) \
(TREE_CODE (NODE) != REFERENCE_TYPE \
&& TREE_CODE (NODE) != VOID_TYPE \
&& TREE_CODE (NODE) != FUNCTION_TYPE \
&& TREE_CODE (NODE) != METHOD_TYPE)
/* Returns true if NODE is a pointer to an object. Keep these checks
in ascending tree code order. */
#define TYPE_PTROB_P(NODE) \
(TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a reference to an object. Keep these checks
in ascending tree code order. */
#define TYPE_REF_OBJ_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a pointer to an object, or a pointer to
void. Keep these checks in ascending tree code order. */
#define TYPE_PTROBV_P(NODE) \
(TYPE_PTR_P (NODE) \
&& !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \
|| TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE))
/* Returns true if NODE is a pointer to function. */
#define TYPE_PTRFN_P(NODE) \
(TREE_CODE (NODE) == POINTER_TYPE \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a reference to function. */
#define TYPE_REFFN_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Nonzero for _TYPE node means that this type is a pointer to member
function type. */
#define TYPE_PTRMEMFUNC_P(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_LANG_SPECIFIC (NODE) \
&& TYPE_PTRMEMFUNC_FLAG (NODE))
#define TYPE_PTRMEMFUNC_FLAG(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->ptrmemfunc_flag)
/* Returns true if NODE is a pointer-to-member. */
#define TYPE_PTR_TO_MEMBER_P(NODE) \
(TYPE_PTRMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE))
/* Indicates when overload resolution may resolve to a pointer to
member function. [expr.unary.op]/3 */
#define PTRMEM_OK_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK2 ((NODE), ADDR_EXPR, OFFSET_REF))
/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
before using this macro. */
#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \
(TREE_TYPE (TYPE_FIELDS (NODE)))
/* Returns `A' for a type like `int (A::*)(double)' */
#define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \
TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* These are use to manipulate the canonical RECORD_TYPE from the
hashed POINTER_TYPE, and can only be used on the POINTER_TYPE. */
#define TYPE_GET_PTRMEMFUNC_TYPE(NODE) \
(TYPE_LANG_SPECIFIC (NODE) ? LANG_TYPE_PTRMEM_CHECK (NODE)->record : NULL)
#define TYPE_SET_PTRMEMFUNC_TYPE(NODE, VALUE) \
do { \
if (TYPE_LANG_SPECIFIC (NODE) == NULL) \
{ \
TYPE_LANG_SPECIFIC (NODE) = GGC_CNEWVAR \
(struct lang_type, sizeof (struct lang_type_ptrmem)); \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.h.is_lang_type_class = 0; \
} \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.record = (VALUE); \
} while (0)
/* For a pointer-to-member type of the form `T X::*', this is `X'.
For a type like `void (X::*)() const', this type is `X', not `const
X'. To get at the `const X' you have to look at the
TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have
type `const X*'. */
#define TYPE_PTRMEM_CLASS_TYPE(NODE) \
(TYPE_PTRMEM_P (NODE) \
? TYPE_OFFSET_BASETYPE (NODE) \
: TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE))
/* For a pointer-to-member type of the form `T X::*', this is `T'. */
#define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \
(TYPE_PTRMEM_P (NODE) \
? TREE_TYPE (NODE) \
: TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for
`X'. */
#define PTRMEM_CST_CLASS(NODE) \
TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE)))
/* For a pointer-to-member constant `X::Y' this is the _DECL for
`Y'. */
#define PTRMEM_CST_MEMBER(NODE) (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member)
/* The expression in question for a TYPEOF_TYPE. */
#define TYPEOF_TYPE_EXPR(NODE) (TYPEOF_TYPE_CHECK (NODE))->type.values
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_EXTERN(NODE) \
DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_STATIC(NODE) \
DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a base class
of the parent object, as opposed to a member field. */
#define DECL_FIELD_IS_BASE(NODE) \
DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE))
/* Nonzero if TYPE is an anonymous union or struct type. We have to use a
flag for this because "A union for which objects or pointers are
declared is not an anonymous union" [class.union]. */
#define ANON_AGGR_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr)
#define SET_ANON_AGGR_TYPE_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1)
/* Nonzero if TYPE is an anonymous union type. */
#define ANON_UNION_TYPE_P(NODE) \
(TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE))
#define UNKNOWN_TYPE LANG_TYPE
/* Define fields and accessors for nodes representing declared names. */
#define TYPE_WAS_ANONYMOUS(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous)
/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */
/* The format of each node in the DECL_FRIENDLIST is as follows:
The TREE_PURPOSE will be the name of a function, i.e., an
IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose
TREE_VALUEs are friends with the given name. */
#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE))
#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST))
#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST))
/* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of
each node is a type; the TREE_VALUE is the access granted for this
DECL in that type. The DECL_ACCESS is set by access declarations.
For example, if a member that would normally be public in a
derived class is made protected, then the derived class and the
protected_access_node will appear in the DECL_ACCESS for the node. */
#define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access)
/* Nonzero if the FUNCTION_DECL is a global constructor. */
#define DECL_GLOBAL_CTOR_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.global_ctor_p)
/* Nonzero if the FUNCTION_DECL is a global destructor. */
#define DECL_GLOBAL_DTOR_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.global_dtor_p)
/* Accessor macros for C++ template decl nodes. */
/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node
is a INT_CST whose TREE_INT_CST_LOW indicates the level of the
template parameters, with 1 being the outermost set of template
parameters. The TREE_VALUE is a vector, whose elements are the
template parameters at each level. Each element in the vector is a
TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a
non-type parameter), or a TYPE_DECL (if the parameter is a type
parameter). The TREE_PURPOSE is the default value, if any. The
TEMPLATE_PARM_INDEX for the parameter is available as the
DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a
TYPE_DECL). */
#define DECL_TEMPLATE_PARMS(NODE) DECL_NON_COMMON_CHECK (NODE)->decl_non_common.arguments
#define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \
INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE))
#define DECL_NTPARMS(NODE) \
TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE))
/* For function, method, class-data templates. */
#define DECL_TEMPLATE_RESULT(NODE) DECL_RESULT_FLD (NODE)
/* For a static member variable template, the
DECL_TEMPLATE_INSTANTIATIONS list contains the explicitly and
implicitly generated instantiations of the variable. There are no
partial instantiations of static member variables, so all of these
will be full instantiations.
For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds
all instantiations and specializations of the class type, including
partial instantiations and partial specializations.
In both cases, the TREE_PURPOSE of each node contains the arguments
used; the TREE_VALUE contains the generated variable. The template
arguments are always complete. For example, given:
template <class T> struct S1 {
template <class U> struct S2 {};
template <class U> struct S2<U*> {};
};
the record for the partial specialization will contain, as its
argument list, { {T}, {U*} }, and will be on the
DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template
<class U> struct S1<T>::S2'.
This list is not used for function templates. */
#define DECL_TEMPLATE_INSTANTIATIONS(NODE) DECL_VINDEX (NODE)
/* For a function template, the DECL_TEMPLATE_SPECIALIZATIONS lists
contains all instantiations and specializations of the function,
including partial instantiations. For a partial instantiation
which is a specialization, this list holds only full
specializations of the template that are instantiations of the
partial instantiation. For example, given:
template <class T> struct S {
template <class U> void f(U);
template <> void f(T);
};
the `S<int>::f<int>(int)' function will appear on the
DECL_TEMPLATE_SPECIALIZATIONS list for both `template <class T>
template <class U> void S<T>::f(U)' and `template <class T> void
S<int>::f(T)'. In the latter case, however, it will have only the
innermost set of arguments (T, in this case). The DECL_TI_TEMPLATE
for the function declaration will point at the specialization, not
the fully general template.
For a class template, this list contains the partial
specializations of this template. (Full specializations are not
recorded on this list.) The TREE_PURPOSE holds the arguments used
in the partial specialization (e.g., for `template <class T> struct
S<T*, int>' this will be `T*'.) The arguments will also include
any outer template arguments. The TREE_VALUE holds the innermost
template parameters for the specialization (e.g., `T' in the
example above.) The TREE_TYPE is the _TYPE node for the partial
specialization.
This list is not used for static variable templates. */
#define DECL_TEMPLATE_SPECIALIZATIONS(NODE) DECL_SIZE (NODE)
/* Nonzero for a DECL which is actually a template parameter. Keep
these checks in ascending tree code order. */
#define DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) \
&& (TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == PARM_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL))
/* Mark NODE as a template parameter. */
#define SET_DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) = 1)
/* Nonzero if NODE is a template template parameter. */
#define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE))
/* Nonzero if NODE is a TEMPLATE_DECL representing an
UNBOUND_CLASS_TEMPLATE tree node. */
#define DECL_UNBOUND_CLASS_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL && !DECL_TEMPLATE_RESULT (NODE))
#define DECL_FUNCTION_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& !DECL_UNBOUND_CLASS_TEMPLATE_P (NODE) \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL)
/* Nonzero for a DECL that represents a template class. */
#define DECL_CLASS_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& !DECL_UNBOUND_CLASS_TEMPLATE_P (NODE) \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL \
&& !DECL_TEMPLATE_TEMPLATE_PARM_P (NODE))
/* Nonzero if NODE which declares a type. */
#define DECL_DECLARES_TYPE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || DECL_CLASS_TEMPLATE_P (NODE))
/* Nonzero if NODE is the typedef implicitly generated for a type when
the type is declared. In C++, `struct S {};' is roughly
equivalent to `struct S {}; typedef struct S S;' in C.
DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this
example. In C++, there is a second implicit typedef for each
class, in the scope of `S' itself, so that you can say `S::S'.
DECL_SELF_REFERENCE_P will hold for that second typedef. */
#define DECL_IMPLICIT_TYPEDEF_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE))
#define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \
(DECL_LANG_FLAG_2 (NODE) = 1)
#define DECL_SELF_REFERENCE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE))
#define SET_DECL_SELF_REFERENCE_P(NODE) \
(DECL_LANG_FLAG_4 (NODE) = 1)
/* A `primary' template is one that has its own template header. A
member function of a class template is a template, but not primary.
A member template is primary. Friend templates are primary, too. */
/* Returns the primary template corresponding to these parameters. */
#define DECL_PRIMARY_TEMPLATE(NODE) \
(TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE)))
/* Returns nonzero if NODE is a primary template. */
#define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE))
/* Non-zero iff NODE is a specialization of a template. The value
indicates the type of specializations:
1=implicit instantiation
2=partial or explicit specialization, e.g.:
template <> int min<int> (int, int),
3=explicit instantiation, e.g.:
template int min<int> (int, int);
Note that NODE will be marked as a specialization even if the
template it is instantiating is not a primary template. For
example, given:
template <typename T> struct O {
void f();
struct I {};
};
both O<int>::f and O<int>::I will be marked as instantiations.
If DECL_USE_TEMPLATE is non-zero, then DECL_TEMPLATE_INFO will also
be non-NULL. */
#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->decl_flags.use_template)
/* Like DECL_USE_TEMPLATE, but for class types. */
#define CLASSTYPE_USE_TEMPLATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->use_template)
/* True if NODE is a specialization of a primary template. */
#define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \
(CLASS_TYPE_P (NODE) \
&& CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (arg)))
#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1)
#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) & 1)
#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2)
#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2)
/* Returns true for an explicit or partial specialization of a class
template. */
#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 2)
#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 2)
#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1)
#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1)
#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 1)
#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 1)
#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3)
#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3)
#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 3)
#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 3)
/* Nonzero if DECL is a friend function which is an instantiation
from the point of view of the compiler, but not from the point of
view of the language. For example given:
template <class T> struct S { friend void f(T) {}; };
the declaration of `void f(int)' generated when S<int> is
instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be
a DECL_FRIEND_PSUEDO_TEMPLATE_INSTANTIATION. */
#define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INFO (DECL) && !DECL_USE_TEMPLATE (DECL))
/* Nonzero iff we are currently processing a declaration for an
entity with its own template parameter list, and which is not a
full specialization. */
#define PROCESSING_REAL_TEMPLATE_DECL_P() \
(processing_template_decl > template_class_depth (current_scope ()))
/* Nonzero if this VAR_DECL or FUNCTION_DECL has already been
instantiated, i.e. its definition has been generated from the
pattern given in the template. */
#define DECL_TEMPLATE_INSTANTIATED(NODE) \
DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE))
/* We know what we're doing with this decl now. */
#define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE)
/* DECL_EXTERNAL must be set on a decl until the decl is actually emitted,
so that assemble_external will work properly. So we have this flag to
tell us whether the decl is really not external. */
#define DECL_NOT_REALLY_EXTERN(NODE) \
(DECL_LANG_SPECIFIC (NODE)->decl_flags.not_really_extern)
#define DECL_REALLY_EXTERN(NODE) \
(DECL_EXTERNAL (NODE) && ! DECL_NOT_REALLY_EXTERN (NODE))
/* A thunk is a stub function.
A thunk is an alternate entry point for an ordinary FUNCTION_DECL.
The address of the ordinary FUNCTION_DECL is given by the
DECL_INITIAL, which is always an ADDR_EXPR whose operand is a
FUNCTION_DECL. The job of the thunk is to either adjust the this
pointer before transferring control to the FUNCTION_DECL, or call
FUNCTION_DECL and then adjust the result value. Note, the result
pointer adjusting thunk must perform a call to the thunked
function, (or be implemented via passing some invisible parameter
to the thunked function, which is modified to perform the
adjustment just before returning).
A thunk may perform either, or both, of the following operations:
o Adjust the this or result pointer by a constant offset.
o Adjust the this or result pointer by looking up a vcall or vbase offset
in the vtable.
A this pointer adjusting thunk converts from a base to a derived
class, and hence adds the offsets. A result pointer adjusting thunk
converts from a derived class to a base, and hence subtracts the
offsets. If both operations are performed, then the constant
adjustment is performed first for this pointer adjustment and last
for the result pointer adjustment.
The constant adjustment is given by THUNK_FIXED_OFFSET. If the
vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is
used. For this pointer adjusting thunks, it is the vcall offset
into the vtable. For result pointer adjusting thunks it is the
binfo of the virtual base to convert to. Use that binfo's vbase
offset.
It is possible to have equivalent covariant thunks. These are
distinct virtual covariant thunks whose vbase offsets happen to
have the same value. THUNK_ALIAS is used to pick one as the
canonical thunk, which will get all the this pointer adjusting
thunks attached to it. */
/* An integer indicating how many bytes should be subtracted from the
this or result pointer when this function is called. */
#define THUNK_FIXED_OFFSET(DECL) \
(DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.f.u5.fixed_offset)
/* A tree indicating how to perform the virtual adjustment. For a this
adjusting thunk it is the number of bytes to be added to the vtable
to find the vcall offset. For a result adjusting thunk, it is the
binfo of the relevant virtual base. If NULL, then there is no
virtual adjust. (The vptr is always located at offset zero from
the this or result pointer.) (If the covariant type is within the
class hierarchy being laid out, the vbase index is not yet known
at the point we need to create the thunks, hence the need to use
binfos.) */
#define THUNK_VIRTUAL_OFFSET(DECL) \
(LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access)
/* A thunk which is equivalent to another thunk. */
#define THUNK_ALIAS(DECL) \
(DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->decl_flags.u.template_info)
/* For thunk NODE, this is the FUNCTION_DECL thunked to. It is
possible for the target to be a thunk too. */
#define THUNK_TARGET(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.f.befriending_classes)
/* True for a SCOPE_REF iff the "template" keyword was used to
indicate that the qualified name denotes a template. */
#define QUALIFIED_NAME_IS_TEMPLATE(NODE) \
(TREE_LANG_FLAG_0 (SCOPE_REF_CHECK (NODE)))
/* True for an OMP_ATOMIC that has dependent parameters. These are stored
as bare LHS/RHS, and not as ADDR/RHS, as in the generic statement. */
#define OMP_ATOMIC_DEPENDENT_P(NODE) \
(TREE_LANG_FLAG_0 (OMP_ATOMIC_CHECK (NODE)))
/* Used to store the operation code when OMP_ATOMIC_DEPENDENT_P is set. */
#define OMP_ATOMIC_CODE(NODE) \
(OMP_ATOMIC_CHECK (NODE)->exp.complexity)
/* Used while gimplifying continue statements bound to OMP_FOR nodes. */
#define OMP_FOR_GIMPLIFYING_P(NODE) \
(TREE_LANG_FLAG_0 (OMP_FOR_CHECK (NODE)))
/* A language-specific token attached to the OpenMP data clauses to
hold code (or code fragments) related to ctors, dtors, and op=.
See semantics.c for details. */
#define CP_OMP_CLAUSE_INFO(NODE) \
TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
OMP_CLAUSE_COPYPRIVATE))
/* These macros provide convenient access to the various _STMT nodes
created when parsing template declarations. */
#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0)
#define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1)
#define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0)
#define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1)
#define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0)
/* Nonzero if this try block is a function try block. */
#define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE))
#define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0)
#define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1)
#define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE))
/* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run
and the VAR_DECL for which this cleanup exists. */
#define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0)
#define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1)
#define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2)
/* IF_STMT accessors. These give access to the condition of the if
statement, the then block of the if statement, and the else block
of the if statement if it exists. */
#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0)
#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1)
#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2)
/* WHILE_STMT accessors. These give access to the condition of the
while statement and the body of the while statement, respectively. */
#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0)
#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1)
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
#define WHILE_ATTRIBUTES(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 2)
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
/* DO_STMT accessors. These give access to the condition of the do
statement and the body of the do statement, respectively. */
#define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0)
#define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1)
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
#define DO_ATTRIBUTES(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 2)
/* APPLE LOCAL begin C* language */
/* Used as a flag to indicate synthesized inner do-while loop of a
foreach statement. Used for generation of break/continue statement
of the loop. */
#define DO_FOREACH(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 3)
/* APPLE LOCAL end C* language */
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
/* FOR_STMT accessors. These give access to the init statement,
condition, update expression, and body of the for statement,
respectively. */
#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0)
#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1)
#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2)
#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3)
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
#define FOR_ATTRIBUTES(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4)
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0)
#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1)
#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2)
/* STMT_EXPR accessor. */
#define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0)
/* EXPR_STMT accessor. This gives the expression associated with an
expression statement. */
#define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0)
/* True if this TARGET_EXPR was created by build_cplus_new, and so we can
discard it if it isn't useful. */
#define TARGET_EXPR_IMPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE))
/* An enumeration of the kind of tags that C++ accepts. */
enum tag_types {
none_type = 0, /* Not a tag type. */
record_type, /* "struct" types. */
class_type, /* "class" types. */
union_type, /* "union" types. */
enum_type, /* "enum" types. */
typename_type /* "typename" types. */
};
/* The various kinds of lvalues we distinguish. */
typedef enum cp_lvalue_kind {
clk_none = 0, /* Things that are not an lvalue. */
clk_ordinary = 1, /* An ordinary lvalue. */
clk_class = 2, /* An rvalue of class-type. */
clk_bitfield = 4, /* An lvalue for a bit-field. */
clk_packed = 8 /* An lvalue for a packed field. */
} cp_lvalue_kind;
/* Various kinds of template specialization, instantiation, etc. */
typedef enum tmpl_spec_kind {
tsk_none, /* Not a template at all. */
tsk_invalid_member_spec, /* An explicit member template
specialization, but the enclosing
classes have not all been explicitly
specialized. */
tsk_invalid_expl_inst, /* An explicit instantiation containing
template parameter lists. */
tsk_excessive_parms, /* A template declaration with too many
template parameter lists. */
tsk_insufficient_parms, /* A template declaration with too few
parameter lists. */
tsk_template, /* A template declaration. */
tsk_expl_spec, /* An explicit specialization. */
tsk_expl_inst /* An explicit instantiation. */
} tmpl_spec_kind;
/* The various kinds of access. BINFO_ACCESS depends on these being
two bit quantities. The numerical values are important; they are
used to initialize RTTI data structures, so changing them changes
the ABI. */
typedef enum access_kind {
ak_none = 0, /* Inaccessible. */
ak_public = 1, /* Accessible, as a `public' thing. */
ak_protected = 2, /* Accessible, as a `protected' thing. */
ak_private = 3 /* Accessible, as a `private' thing. */
} access_kind;
/* The various kinds of special functions. If you add to this list,
you should update special_function_p as well. */
typedef enum special_function_kind {
sfk_none = 0, /* Not a special function. This enumeral
must have value zero; see
special_function_p. */
sfk_constructor, /* A constructor. */
sfk_copy_constructor, /* A copy constructor. */
sfk_assignment_operator, /* An assignment operator. */
sfk_destructor, /* A destructor. */
sfk_complete_destructor, /* A destructor for complete objects. */
sfk_base_destructor, /* A destructor for base subobjects. */
sfk_deleting_destructor, /* A destructor for complete objects that
deletes the object after it has been
destroyed. */
sfk_conversion /* A conversion operator. */
} special_function_kind;
/* The various kinds of linkage. From [basic.link],
A name is said to have linkage when it might denote the same
object, reference, function, type, template, namespace or value
as a name introduced in another scope:
-- When a name has external linkage, the entity it denotes can
be referred to from scopes of other translation units or from
other scopes of the same translation unit.
-- When a name has internal linkage, the entity it denotes can
be referred to by names from other scopes in the same
translation unit.
-- When a name has no linkage, the entity it denotes cannot be
referred to by names from other scopes. */
typedef enum linkage_kind {
lk_none, /* No linkage. */
lk_internal, /* Internal linkage. */
lk_external /* External linkage. */
} linkage_kind;
/* Bitmask flags to control type substitution. */
typedef enum tsubst_flags_t {
tf_none = 0, /* nothing special */
tf_error = 1 << 0, /* give error messages */
tf_warning = 1 << 1, /* give warnings too */
tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */
tf_keep_type_decl = 1 << 3, /* retain typedef type decls
(make_typename_type use) */
tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal
instantiate_type use) */
tf_user = 1 << 5, /* found template must be a user template
(lookup_template_class use) */
tf_conv = 1 << 6, /* We are determining what kind of
conversion might be permissible,
not actually performing the
conversion. */
/* Convenient substitution flags combinations. */
tf_warning_or_error = tf_warning | tf_error
} tsubst_flags_t;
/* The kind of checking we can do looking in a class hierarchy. */
typedef enum base_access {
ba_any = 0, /* Do not check access, allow an ambiguous base,
prefer a non-virtual base */
ba_unique = 1 << 0, /* Must be a unique base. */
ba_check_bit = 1 << 1, /* Check access. */
ba_check = ba_unique | ba_check_bit,
ba_ignore_scope = 1 << 2, /* Ignore access allowed by local scope. */
ba_quiet = 1 << 3 /* Do not issue error messages. */
} base_access;
/* The various kinds of access check during parsing. */
typedef enum deferring_kind {
dk_no_deferred = 0, /* Check access immediately */
dk_deferred = 1, /* Deferred check */
dk_no_check = 2 /* No access check */
} deferring_kind;
/* The kind of base we can find, looking in a class hierarchy.
Values <0 indicate we failed. */
typedef enum base_kind {
bk_inaccessible = -3, /* The base is inaccessible */
bk_ambig = -2, /* The base is ambiguous */
bk_not_base = -1, /* It is not a base */
bk_same_type = 0, /* It is the same type */
bk_proper_base = 1, /* It is a proper base */
bk_via_virtual = 2 /* It is a proper base, but via a virtual
path. This might not be the canonical
binfo. */
} base_kind;
/* Node for "pointer to (virtual) function".
This may be distinct from ptr_type_node so gdb can distinguish them. */
#define vfunc_ptr_type_node vtable_entry_type
/* For building calls to `delete'. */
extern GTY(()) tree integer_two_node;
extern GTY(()) tree integer_three_node;
/* The number of function bodies which we are currently processing.
(Zero if we are at namespace scope, one inside the body of a
function, two inside the body of a function in a local class, etc.) */
extern int function_depth;
/* in pt.c */
/* These values are used for the `STRICT' parameter to type_unification and
fn_type_unification. Their meanings are described with the
documentation for fn_type_unification. */
typedef enum unification_kind_t {
DEDUCE_CALL,
DEDUCE_CONV,
DEDUCE_EXACT
} unification_kind_t;
/* Macros for operating on a template instantiation level node. */
#define TINST_DECL(NODE) \
(((tinst_level_t) TINST_LEVEL_CHECK (NODE))->decl)
#define TINST_LOCATION(NODE) \
(((tinst_level_t) TINST_LEVEL_CHECK (NODE))->locus)
#define TINST_IN_SYSTEM_HEADER_P(NODE) \
(((tinst_level_t) TINST_LEVEL_CHECK (NODE))->in_system_header_p)
/* in class.c */
extern int current_class_depth;
/* An array of all local classes present in this translation unit, in
declaration order. */
extern GTY(()) VEC(tree,gc) *local_classes;
/* Here's where we control how name mangling takes place. */
/* Cannot use '$' up front, because this confuses gdb
(names beginning with '$' are gdb-local identifiers).
Note that all forms in which the '$' is significant are long enough
for direct indexing (meaning that if we know there is a '$'
at a particular location, we can index into the string at
any other location that provides distinguishing characters). */
/* Define NO_DOLLAR_IN_LABEL in your favorite tm file if your assembler
doesn't allow '$' in symbol names. */
#ifndef NO_DOLLAR_IN_LABEL
#define JOINER '$'
#define AUTO_TEMP_NAME "_$tmp_"
#define VFIELD_BASE "$vf"
#define VFIELD_NAME "_vptr$"
#define VFIELD_NAME_FORMAT "_vptr$%s"
#define ANON_AGGRNAME_FORMAT "$_%d"
#else /* NO_DOLLAR_IN_LABEL */
#ifndef NO_DOT_IN_LABEL
#define JOINER '.'
#define AUTO_TEMP_NAME "_.tmp_"
#define VFIELD_BASE ".vf"
#define VFIELD_NAME "_vptr."
#define VFIELD_NAME_FORMAT "_vptr.%s"
#define ANON_AGGRNAME_FORMAT "._%d"
#else /* NO_DOT_IN_LABEL */
#define IN_CHARGE_NAME "__in_chrg"
#define AUTO_TEMP_NAME "__tmp_"
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \
sizeof (AUTO_TEMP_NAME) - 1))
#define VTABLE_NAME "__vt_"
#define VTABLE_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \
sizeof (VTABLE_NAME) - 1))
#define VFIELD_BASE "__vfb"
#define VFIELD_NAME "__vptr_"
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \
sizeof (VFIELD_NAME) - 1))
#define VFIELD_NAME_FORMAT "__vptr_%s"
#define ANON_AGGRNAME_PREFIX "__anon_"
#define ANON_AGGRNAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), ANON_AGGRNAME_PREFIX, \
sizeof (ANON_AGGRNAME_PREFIX) - 1))
#define ANON_AGGRNAME_FORMAT "__anon_%d"
#endif /* NO_DOT_IN_LABEL */
#endif /* NO_DOLLAR_IN_LABEL */
#define THIS_NAME "this"
#define IN_CHARGE_NAME "__in_chrg"
#define VTBL_PTR_TYPE "__vtbl_ptr_type"
#define VTABLE_DELTA_NAME "__delta"
#define VTABLE_PFN_NAME "__pfn"
#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL)
#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
&& IDENTIFIER_POINTER (ID_NODE)[2] == 't' \
&& IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1))
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1))
/* For anonymous aggregate types, we need some sort of name to
hold on to. In practice, this should not appear, but it should
not be harmful if it does. */
#define ANON_AGGRNAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[0] == JOINER \
&& IDENTIFIER_POINTER (ID_NODE)[1] == '_')
#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */
/* Nonzero if we're done parsing and into end-of-file activities. */
extern int at_eof;
/* A list of namespace-scope objects which have constructors or
destructors which reside in the global scope. The decl is stored
in the TREE_VALUE slot and the initializer is stored in the
TREE_PURPOSE slot. */
extern GTY(()) tree static_aggregates;
/* Functions called along with real static constructors and destructors. */
extern GTY(()) tree static_ctors;
extern GTY(()) tree static_dtors;
enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, OP_FLAG, TYPENAME_FLAG };
/* These are uses as bits in flags passed to various functions to
control their behavior. Despite the LOOKUP_ prefix, many of these
do not control name lookup. ??? Functions using these flags should
probably be modified to accept explicit boolean flags for the
behaviors relevant to them. */
/* Check for access violations. */
#define LOOKUP_PROTECT (1 << 0)
/* Complain if no suitable member function matching the arguments is
found. */
#define LOOKUP_COMPLAIN (1 << 1)
#define LOOKUP_NORMAL (LOOKUP_PROTECT | LOOKUP_COMPLAIN)
/* Even if the function found by lookup is a virtual function, it
should be called directly. */
#define LOOKUP_NONVIRTUAL (1 << 2)
/* Non-converting (i.e., "explicit") constructors are not tried. */
#define LOOKUP_ONLYCONVERTING (1 << 3)
/* If a temporary is created, it should be created so that it lives
as long as the current variable bindings; otherwise it only lives
until the end of the complete-expression. It also forces
direct-initialization in cases where other parts of the compiler
have already generated a temporary, such as reference
initialization and the catch parameter. */
#define DIRECT_BIND (1 << 4)
/* User-defined conversions are not permitted. (Built-in conversions
are permitted.) */
#define LOOKUP_NO_CONVERSION (1 << 5)
/* The user has explicitly called a destructor. (Therefore, we do
not need to check that the object is non-NULL before calling the
destructor.) */
#define LOOKUP_DESTRUCTOR (1 << 6)
/* Do not permit references to bind to temporaries. */
#define LOOKUP_NO_TEMP_BIND (1 << 7)
/* Do not accept objects, and possibly namespaces. */
#define LOOKUP_PREFER_TYPES (1 << 8)
/* Do not accept objects, and possibly types. */
#define LOOKUP_PREFER_NAMESPACES (1 << 9)
/* Accept types or namespaces. */
#define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES)
/* We are checking that a constructor can be called -- but we do not
actually plan to call it. */
#define LOOKUP_CONSTRUCTOR_CALLABLE (1 << 10)
/* Return friend declarations and un-declared builtin functions.
(Normally, these entities are registered in the symbol table, but
not found by lookup.) */
#define LOOKUP_HIDDEN (LOOKUP_CONSTRUCTOR_CALLABLE << 1)
#define LOOKUP_NAMESPACES_ONLY(F) \
(((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_TYPES_ONLY(F) \
(!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH)
/* These flags are used by the conversion code.
CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
CONV_STATIC : Perform the explicit conversions for static_cast.
CONV_CONST : Perform the explicit conversions for const_cast.
CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
CONV_PRIVATE : Perform upcasts to private bases.
CONV_FORCE_TEMP : Require a new temporary when converting to the same
aggregate type. */
#define CONV_IMPLICIT 1
#define CONV_STATIC 2
#define CONV_CONST 4
#define CONV_REINTERPRET 8
#define CONV_PRIVATE 16
/* #define CONV_NONCONVERTING 32 */
#define CONV_FORCE_TEMP 64
#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET)
#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP)
/* Used by build_expr_type_conversion to indicate which types are
acceptable as arguments to the expression under consideration. */
#define WANT_INT 1 /* integer types, including bool */
#define WANT_FLOAT 2 /* floating point types */
#define WANT_ENUM 4 /* enumerated types */
#define WANT_POINTER 8 /* pointer types */
#define WANT_NULL 16 /* null pointer constant */
#define WANT_VECTOR 32 /* vector types */
#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR)
/* Used with comptypes, and related functions, to guide type
comparison. */
#define COMPARE_STRICT 0 /* Just check if the types are the
same. */
#define COMPARE_BASE 1 /* Check to see if the second type is
derived from the first. */
#define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in
reverse. */
#define COMPARE_REDECLARATION 4 /* The comparison is being done when
another declaration of an existing
entity is seen. */
/* Used with push_overloaded_decl. */
#define PUSH_GLOBAL 0 /* Push the DECL into namespace scope,
regardless of the current scope. */
#define PUSH_LOCAL 1 /* Push the DECL into the current
scope. */
#define PUSH_USING 2 /* We are pushing this DECL as the
result of a using declaration. */
/* Used with start function. */
#define SF_DEFAULT 0 /* No flags. */
#define SF_PRE_PARSED 1 /* The function declaration has
already been parsed. */
#define SF_INCLASS_INLINE 2 /* The function is an inline, defined
in the class body. */
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2
is derived from TYPE1, or if TYPE2 is a pointer (reference) to a
class derived from the type pointed to (referred to) by TYPE1. */
#define same_or_base_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_BASE)
/* These macros are used to access a TEMPLATE_PARM_INDEX. */
#define TEMPLATE_PARM_INDEX_CAST(NODE) \
((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE))
#define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index)
#define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level)
#define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE))
#define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level)
#define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl)
/* These macros are for accessing the fields of TEMPLATE_TYPE_PARM,
TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */
#define TEMPLATE_TYPE_PARM_INDEX(NODE) \
(TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM, \
BOUND_TEMPLATE_TEMPLATE_PARM))->type.values
#define TEMPLATE_TYPE_IDX(NODE) \
(TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_LEVEL(NODE) \
(TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \
(TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_DECL(NODE) \
(TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
/* These constants can used as bit flags in the process of tree formatting.
TFF_PLAIN_IDENTIFIER: unqualified part of a name.
TFF_SCOPE: include the class and namespace scope of the name.
TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name.
TFF_DECL_SPECIFIERS: print decl-specifiers.
TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with
a class-key (resp. `enum').
TFF_RETURN_TYPE: include function return type.
TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values.
TFF_EXCEPTION_SPECIFICATION: show function exception specification.
TFF_TEMPLATE_HEADER: show the template<...> header in a
template-declaration.
TFF_TEMPLATE_NAME: show only template-name.
TFF_EXPR_IN_PARENS: parenthesize expressions.
TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments. */
#define TFF_PLAIN_IDENTIFIER (0)
#define TFF_SCOPE (1)
#define TFF_CHASE_TYPEDEF (1 << 1)
#define TFF_DECL_SPECIFIERS (1 << 2)
#define TFF_CLASS_KEY_OR_ENUM (1 << 3)
#define TFF_RETURN_TYPE (1 << 4)
#define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5)
#define TFF_EXCEPTION_SPECIFICATION (1 << 6)
#define TFF_TEMPLATE_HEADER (1 << 7)
#define TFF_TEMPLATE_NAME (1 << 8)
#define TFF_EXPR_IN_PARENS (1 << 9)
#define TFF_NO_FUNCTION_ARGUMENTS (1 << 10)
/* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM
node. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \
((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TYPE_TI_TEMPLATE (NODE) \
: TYPE_NAME (NODE))
/* in lex.c */
extern void init_reswords (void);
/* Indexed by TREE_CODE, these tables give C-looking names to
operators represented by TREE_CODES. For example,
opname_tab[(int) MINUS_EXPR] == "-". */
extern const char **opname_tab, **assignop_tab;
typedef struct operator_name_info_t GTY(())
{
/* The IDENTIFIER_NODE for the operator. */
tree identifier;
/* The name of the operator. */
const char *name;
/* The mangled name of the operator. */
const char *mangled_name;
/* The arity of the operator. */
int arity;
} operator_name_info_t;
/* A mapping from tree codes to operator name information. */
extern GTY(()) operator_name_info_t operator_name_info
[(int) LAST_CPLUS_TREE_CODE];
/* Similar, but for assignment operators. */
extern GTY(()) operator_name_info_t assignment_operator_name_info
[(int) LAST_CPLUS_TREE_CODE];
/* A type-qualifier, or bitmask therefore, using the TYPE_QUAL
constants. */
typedef int cp_cv_quals;
/* A storage class. */
typedef enum cp_storage_class {
/* sc_none must be zero so that zeroing a cp_decl_specifier_seq
sets the storage_class field to sc_none. */
sc_none = 0,
sc_auto,
sc_register,
sc_static,
sc_extern,
sc_mutable
} cp_storage_class;
/* An individual decl-specifier. */
typedef enum cp_decl_spec {
ds_first,
ds_signed = ds_first,
ds_unsigned,
ds_short,
ds_long,
ds_const,
ds_volatile,
ds_restrict,
ds_inline,
ds_virtual,
ds_explicit,
ds_friend,
ds_typedef,
ds_complex,
ds_thread,
ds_last
} cp_decl_spec;
/* A decl-specifier-seq. */
typedef struct cp_decl_specifier_seq {
/* The number of times each of the keywords has been seen. */
unsigned specs[(int) ds_last];
/* The primary type, if any, given by the decl-specifier-seq.
Modifiers, like "short", "const", and "unsigned" are not
reflected here. This field will be a TYPE, unless a typedef-name
was used, in which case it will be a TYPE_DECL. */
tree type;
/* The attributes, if any, provided with the specifier sequence. */
tree attributes;
/* If non-NULL, a built-in type that the user attempted to redefine
to some other type. */
tree redefined_builtin_type;
/* The storage class specified -- or sc_none if no storage class was
explicitly specified. */
cp_storage_class storage_class;
/* True iff TYPE_SPEC indicates a user-defined type. */
BOOL_BITFIELD user_defined_type_p : 1;
/* True iff multiple types were (erroneously) specified for this
decl-specifier-seq. */
BOOL_BITFIELD multiple_types_p : 1;
/* True iff multiple storage classes were (erroneously) specified
for this decl-specifier-seq or a combination of a storage class
with a typedef specifier. */
BOOL_BITFIELD conflicting_specifiers_p : 1;
/* True iff at least one decl-specifier was found. */
BOOL_BITFIELD any_specifiers_p : 1;
/* True iff "int" was explicitly provided. */
BOOL_BITFIELD explicit_int_p : 1;
/* True iff "char" was explicitly provided. */
BOOL_BITFIELD explicit_char_p : 1;
} cp_decl_specifier_seq;
/* The various kinds of declarators. */
typedef enum cp_declarator_kind {
cdk_id,
cdk_function,
cdk_array,
cdk_pointer,
cdk_reference,
cdk_ptrmem,
/* APPLE LOCAL blocks 6040305 (ch) */
cdk_block_pointer,
cdk_error
} cp_declarator_kind;
/* A declarator. */
typedef struct cp_declarator cp_declarator;
typedef struct cp_parameter_declarator cp_parameter_declarator;
/* A parameter, before it has been semantically analyzed. */
struct cp_parameter_declarator {
/* The next parameter, or NULL_TREE if none. */
cp_parameter_declarator *next;
/* The decl-specifiers-seq for the parameter. */
cp_decl_specifier_seq decl_specifiers;
/* The declarator for the parameter. */
cp_declarator *declarator;
/* The default-argument expression, or NULL_TREE, if none. */
tree default_argument;
/* True iff this is the first parameter in the list and the
parameter sequence ends with an ellipsis. */
bool ellipsis_p;
};
/* A declarator. */
struct cp_declarator {
/* The kind of declarator. */
cp_declarator_kind kind;
/* Attributes that apply to this declarator. */
tree attributes;
/* For all but cdk_id and cdk_error, the contained declarator. For
cdk_id and cdk_error, guaranteed to be NULL. */
cp_declarator *declarator;
location_t id_loc; /* Currently only set for cdk_id. */
union {
/* For identifiers. */
struct {
/* If non-NULL, the qualifying scope (a NAMESPACE_DECL or
*_TYPE) for this identifier. */
tree qualifying_scope;
/* The unqualified name of the entity -- an IDENTIFIER_NODE,
BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */
tree unqualified_name;
/* If this is the name of a function, what kind of special
function (if any). */
special_function_kind sfk;
} id;
/* For functions. */
struct {
/* The parameters to the function. */
cp_parameter_declarator *parameters;
/* The cv-qualifiers for the function. */
cp_cv_quals qualifiers;
/* The exception-specification for the function. */
tree exception_specification;
} function;
/* For arrays. */
struct {
/* The bounds to the array. */
tree bounds;
} array;
/* For cdk_pointer, cdk_reference, and cdk_ptrmem. */
struct {
/* The cv-qualifiers for the pointer. */
cp_cv_quals qualifiers;
/* For cdk_ptrmem, the class type containing the member. */
tree class_type;
} pointer;
/* APPLE LOCAL begin blocks 6040305 (ch) */
/* For cdk_block_pointer. */
struct {
/* The cv-qualifiers for the pointer. */
cp_cv_quals qualifiers;
} block_pointer;
/* APPLE LOCAL end blocks 6040305 (ch) */
} u;
};
/* A parameter list indicating for a function with no parameters,
e.g "int f(void)". */
extern cp_parameter_declarator *no_parameters;
/* in call.c */
extern bool check_dtor_name (tree, tree);
extern tree build_vfield_ref (tree, tree);
extern tree build_conditional_expr (tree, tree, tree);
extern tree build_addr_func (tree);
extern tree build_call (tree, tree);
extern bool null_ptr_cst_p (tree);
extern bool sufficient_parms_p (tree);
extern tree type_decays_to (tree);
extern tree build_user_type_conversion (tree, tree, int);
extern tree build_new_function_call (tree, tree, bool);
extern tree build_operator_new_call (tree, tree, tree *, tree *,
tree *);
extern tree build_new_method_call (tree, tree, tree, tree, int,
tree *);
extern tree build_special_member_call (tree, tree, tree, tree, int);
extern tree build_new_op (enum tree_code, int, tree, tree, tree, bool *);
extern tree build_op_delete_call (enum tree_code, tree, tree, bool, tree, tree);
extern bool can_convert (tree, tree);
extern bool can_convert_arg (tree, tree, tree, int);
extern bool can_convert_arg_bad (tree, tree, tree);
extern bool enforce_access (tree, tree, tree);
extern tree convert_default_arg (tree, tree, tree, int);
extern tree convert_arg_to_ellipsis (tree);
extern tree build_x_va_arg (tree, tree);
extern tree cxx_type_promotes_to (tree);
extern tree type_passed_as (tree);
extern tree convert_for_arg_passing (tree, tree);
extern bool is_properly_derived_from (tree, tree);
extern tree initialize_reference (tree, tree, tree, tree *);
extern tree make_temporary_var_for_ref_to_temp (tree, tree);
extern tree strip_top_quals (tree);
extern tree perform_implicit_conversion (tree, tree);
extern tree perform_direct_initialization_if_possible (tree, tree, bool);
extern tree in_charge_arg_for_name (tree);
extern tree build_cxx_call (tree, tree);
#ifdef ENABLE_CHECKING
extern void validate_conversion_obstack (void);
#endif /* ENABLE_CHECKING */
/* in class.c */
extern tree build_base_path (enum tree_code, tree,
tree, int);
extern tree convert_to_base (tree, tree, bool, bool);
extern tree convert_to_base_statically (tree, tree);
extern tree build_vtbl_ref (tree, tree);
extern tree build_vfn_ref (tree, tree);
extern tree get_vtable_decl (tree, int);
extern void resort_type_method_vec (void *, void *,
gt_pointer_operator, void *);
extern bool add_method (tree, tree, tree);
extern bool currently_open_class (tree);
extern tree currently_open_derived_class (tree);
extern tree finish_struct (tree, tree);
extern void finish_struct_1 (tree);
extern int resolves_to_fixed_type_p (tree, int *);
extern void init_class_processing (void);
extern int is_empty_class (tree);
extern void pushclass (tree);
extern void popclass (void);
extern void push_nested_class (tree);
extern void pop_nested_class (void);
extern int current_lang_depth (void);
extern void push_lang_context (tree);
extern void pop_lang_context (void);
extern tree instantiate_type (tree, tree, tsubst_flags_t);
extern void print_class_statistics (void);
extern void cxx_print_statistics (void);
extern void cxx_print_xnode (FILE *, tree, int);
extern void cxx_print_decl (FILE *, tree, int);
extern void cxx_print_type (FILE *, tree, int);
extern void cxx_print_identifier (FILE *, tree, int);
extern void cxx_print_error_function (struct diagnostic_context *,
const char *);
extern void build_self_reference (void);
extern int same_signature_p (tree, tree);
extern void maybe_add_class_template_decl_list (tree, tree, int);
extern void unreverse_member_declarations (tree);
extern void invalidate_class_lookup_cache (void);
extern void maybe_note_name_used_in_class (tree, tree);
extern void note_name_declared_in_class (tree, tree);
extern tree get_vtbl_decl_for_binfo (tree);
extern void debug_class (tree);
extern void debug_thunks (tree);
extern tree cp_fold_obj_type_ref (tree, tree);
extern void set_linkage_according_to_type (tree, tree);
extern void determine_key_method (tree);
extern void check_for_override (tree, tree);
extern void push_class_stack (void);
extern void pop_class_stack (void);
/* in cvt.c */
extern tree convert_to_reference (tree, tree, int, int, tree);
extern tree convert_from_reference (tree);
extern tree force_rvalue (tree);
extern tree ocp_convert (tree, tree, int, int);
extern tree cp_convert (tree, tree);
extern tree convert_to_void (tree, const char */*implicit context*/);
extern tree convert_force (tree, tree, int);
extern tree build_expr_type_conversion (int, tree, bool);
extern tree type_promotes_to (tree);
extern tree perform_qualification_conversions (tree, tree);
extern void clone_function_decl (tree, int);
extern void adjust_clone_args (tree);
/* decl.c */
extern tree poplevel (int, int, int);
extern void insert_block (tree);
extern tree pushdecl (tree);
extern tree pushdecl_maybe_friend (tree, bool);
extern void cxx_init_decl_processing (void);
enum cp_tree_node_structure_enum cp_tree_node_structure
(union lang_tree_node *);
extern bool cxx_mark_addressable (tree);
extern void cxx_push_function_context (struct function *);
extern void cxx_pop_function_context (struct function *);
extern void maybe_push_cleanup_level (tree);
extern void finish_scope (void);
extern void push_switch (tree);
extern void pop_switch (void);
extern tree pushtag (tree, tree, tag_scope);
extern tree make_anon_name (void);
extern int decls_match (tree, tree);
extern tree duplicate_decls (tree, tree, bool);
extern tree pushdecl_top_level_maybe_friend (tree, bool);
extern tree pushdecl_top_level_and_finish (tree, tree);
extern tree declare_local_label (tree);
extern tree define_label (location_t, tree);
extern void check_goto (tree);
extern bool check_omp_return (void);
extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t);
extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t);
extern tree check_for_out_of_scope_variable (tree);
extern tree build_library_fn (tree, tree);
extern tree build_library_fn_ptr (const char *, tree);
extern tree build_cp_library_fn_ptr (const char *, tree);
extern tree push_library_fn (tree, tree);
extern tree push_void_library_fn (tree, tree);
extern tree push_throw_library_fn (tree, tree);
extern tree check_tag_decl (cp_decl_specifier_seq *);
extern tree shadow_tag (cp_decl_specifier_seq *);
extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *);
/* APPLE LOCAL 6339747 */
extern tree grokblockdecl (cp_decl_specifier_seq *, const cp_declarator *);
extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *);
extern void start_decl_1 (tree, bool);
extern void cp_finish_decl (tree, tree, bool, tree, int);
extern void finish_decl (tree, tree, tree);
extern int cp_complete_array_type (tree *, tree, bool);
extern tree build_ptrmemfunc_type (tree);
extern tree build_ptrmem_type (tree, tree);
/* the grokdeclarator prototype is in decl.h */
extern tree build_this_parm (tree, cp_cv_quals);
extern int copy_fn_p (tree);
extern tree get_scope_of_declarator (const cp_declarator *);
extern void grok_special_member_properties (tree);
extern int grok_ctor_properties (tree, tree);
extern bool grok_op_properties (tree, bool);
extern tree xref_tag (enum tag_types, tree, tag_scope, bool);
extern tree xref_tag_from_type (tree, tree, tag_scope);
extern bool xref_basetypes (tree, tree);
extern tree start_enum (tree);
extern void finish_enum (tree);
extern void build_enumerator (tree, tree, tree);
extern void start_preparsed_function (tree, tree, int);
extern int start_function (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern tree begin_function_body (void);
extern void finish_function_body (tree);
extern tree finish_function (int);
extern tree start_method (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern tree finish_method (tree);
extern void maybe_register_incomplete_var (tree);
extern void complete_vars (tree);
extern void finish_stmt (void);
extern void print_other_binding_stack (struct cp_binding_level *);
extern void revert_static_member_fn (tree);
extern void fixup_anonymous_aggr (tree);
extern int check_static_variable_definition (tree, tree);
extern tree compute_array_index_type (tree, tree);
extern tree check_default_argument (tree, tree);
typedef int (*walk_namespaces_fn) (tree, void *);
extern int walk_namespaces (walk_namespaces_fn,
void *);
extern int wrapup_globals_for_namespace (tree, void *);
extern tree create_implicit_typedef (tree, tree);
extern tree maybe_push_decl (tree);
extern tree force_target_expr (tree, tree);
extern tree build_target_expr_with_type (tree, tree);
extern int local_variable_p (tree);
extern int nonstatic_local_decl_p (tree);
extern tree register_dtor_fn (tree);
extern tmpl_spec_kind current_tmpl_spec_kind (int);
extern tree cp_fname_init (const char *, tree *);
extern tree builtin_function (const char *name, tree type,
int code,
enum built_in_class cl,
const char *libname,
tree attrs);
extern tree check_elaborated_type_specifier (enum tag_types, tree, bool);
extern void warn_extern_redeclared_static (tree, tree);
extern const char *cxx_comdat_group (tree);
extern bool cp_missing_noreturn_ok_p (tree);
extern void initialize_artificial_var (tree, tree);
extern tree check_var_type (tree, tree);
extern tree reshape_init (tree, tree);
/* in decl2.c */
extern bool check_java_method (tree);
extern tree build_memfn_type (tree, tree, cp_cv_quals);
extern void maybe_retrofit_in_chrg (tree);
extern void maybe_make_one_only (tree);
extern void grokclassfn (tree, tree,
enum overload_flags);
extern tree grok_array_decl (tree, tree);
extern tree delete_sanity (tree, tree, bool, int);
extern tree check_classfn (tree, tree, tree);
extern void check_member_template (tree);
extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, bool, tree, tree);
extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *,
tree);
extern void cplus_decl_attributes (tree *, tree, int);
extern void finish_anon_union (tree);
extern void cp_finish_file (void);
extern tree coerce_new_type (tree);
extern tree coerce_delete_type (tree);
extern void comdat_linkage (tree);
extern void determine_visibility (tree);
extern void constrain_class_visibility (tree);
extern void update_member_visibility (tree);
extern void import_export_decl (tree);
extern tree build_cleanup (tree);
extern tree build_offset_ref_call_from_tree (tree, tree);
extern void check_default_args (tree);
extern void mark_used (tree);
extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
extern tree cp_build_parm_decl (tree, tree);
extern tree get_guard (tree);
extern tree get_guard_cond (tree);
extern tree set_guard (tree);
extern tree cxx_callgraph_analyze_expr (tree *, int *, tree);
extern void mark_needed (tree);
extern bool decl_needed_p (tree);
extern void note_vague_linkage_fn (tree);
extern tree build_artificial_parm (tree, tree);
/* in error.c */
extern void init_error (void);
extern const char *type_as_string (tree, int);
extern const char *decl_as_string (tree, int);
extern const char *expr_as_string (tree, int);
extern const char *lang_decl_name (tree, int);
extern const char *language_to_string (enum languages);
extern const char *class_key_or_enum_as_string (tree);
extern void print_instantiation_context (void);
/* in except.c */
extern void init_exception_processing (void);
extern tree expand_start_catch_block (tree);
extern void expand_end_catch_block (void);
extern tree build_exc_ptr (void);
extern tree build_throw (tree);
extern int nothrow_libfn_p (tree);
extern void check_handlers (tree);
extern void choose_personality_routine (enum languages);
extern tree eh_type_info (tree);
/* in expr.c */
extern rtx cxx_expand_expr (tree, rtx,
enum machine_mode,
int, rtx *);
extern tree cplus_expand_constant (tree);
/* friend.c */
extern int is_friend (tree, tree);
extern void make_friend_class (tree, tree, bool);
extern void add_friend (tree, tree, bool);
extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool);
/* in init.c */
extern tree expand_member_init (tree);
extern void emit_mem_initializers (tree);
extern tree build_aggr_init (tree, tree, int);
extern int is_aggr_type (tree, int);
extern tree get_type_value (tree);
extern tree build_zero_init (tree, tree, bool);
extern tree build_offset_ref (tree, tree, bool);
extern tree build_new (tree, tree, tree, tree, int);
extern tree build_vec_init (tree, tree, tree, bool, int);
extern tree build_delete (tree, tree,
special_function_kind,
int, int);
extern void push_base_cleanups (void);
extern tree build_vec_delete (tree, tree,
special_function_kind, int);
extern tree create_temporary_var (tree);
extern void initialize_vtbl_ptrs (tree);
extern tree build_java_class_ref (tree);
extern tree integral_constant_value (tree);
/* in lex.c */
extern void cxx_dup_lang_specific_decl (tree);
extern void yyungetc (int, int);
extern tree unqualified_name_lookup_error (tree);
extern tree unqualified_fn_lookup_error (tree);
extern tree build_lang_decl (enum tree_code, tree, tree);
extern void retrofit_lang_decl (tree);
extern tree copy_decl (tree);
extern tree copy_type (tree);
extern tree cxx_make_type (enum tree_code);
extern tree make_aggr_type (enum tree_code);
extern void yyerror (const char *);
extern void yyhook (int);
extern bool cxx_init (void);
extern void cxx_finish (void);
extern bool in_main_input_context (void);
/* in method.c */
extern void init_method (void);
extern tree make_thunk (tree, bool, tree, tree);
extern void finish_thunk (tree);
extern void use_thunk (tree, bool);
extern void synthesize_method (tree);
extern tree lazily_declare_fn (special_function_kind,
tree);
extern tree skip_artificial_parms_for (tree, tree);
extern tree make_alias_for (tree, tree);
/* In optimize.c */
extern bool maybe_clone_body (tree);
/* in pt.c */
extern void check_template_shadow (tree);
extern tree get_innermost_template_args (tree, int);
extern void maybe_begin_member_template_processing (tree);
extern void maybe_end_member_template_processing (void);
extern tree finish_member_template_decl (tree);
extern void begin_template_parm_list (void);
extern bool begin_specialization (void);
extern void reset_specialization (void);
extern void end_specialization (void);
extern void begin_explicit_instantiation (void);
extern void end_explicit_instantiation (void);
extern tree check_explicit_specialization (tree, tree, int, int);
extern tree process_template_parm (tree, tree, bool);
extern tree end_template_parm_list (tree);
extern void end_template_decl (void);
extern tree push_template_decl (tree);
extern tree push_template_decl_real (tree, bool);
extern bool redeclare_class_template (tree, tree);
extern tree lookup_template_class (tree, tree, tree, tree,
int, tsubst_flags_t);
extern tree lookup_template_function (tree, tree);
extern int uses_template_parms (tree);
extern int uses_template_parms_level (tree, int);
extern tree instantiate_class_template (tree);
extern tree instantiate_template (tree, tree, tsubst_flags_t);
extern int fn_type_unification (tree, tree, tree, tree,
tree, unification_kind_t, int);
extern void mark_decl_instantiated (tree, int);
extern int more_specialized_fn (tree, tree, int);
extern void do_decl_instantiation (tree, tree);
extern void do_type_instantiation (tree, tree, tsubst_flags_t);
extern tree instantiate_decl (tree, int, bool);
extern int comp_template_parms (tree, tree);
extern int template_class_depth (tree);
extern int is_specialization_of (tree, tree);
extern bool is_specialization_of_friend (tree, tree);
extern int comp_template_args (tree, tree);
extern tree maybe_process_partial_specialization (tree);
extern tree most_specialized_instantiation (tree);
extern void print_candidates (tree);
extern void instantiate_pending_templates (int);
extern tree tsubst_default_argument (tree, tree, tree);
extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t,
tree, bool, bool);
extern tree most_general_template (tree);
extern tree get_mostly_instantiated_function_type (tree);
extern int problematic_instantiation_changed (void);
extern void record_last_problematic_instantiation (void);
extern tree current_instantiation (void);
extern tree maybe_get_template_decl_from_type_decl (tree);
extern int processing_template_parmlist;
extern bool dependent_type_p (tree);
extern bool any_dependent_template_arguments_p (tree);
extern bool dependent_template_p (tree);
extern bool dependent_template_id_p (tree, tree);
extern bool type_dependent_expression_p (tree);
extern bool any_type_dependent_arguments_p (tree);
extern bool value_dependent_expression_p (tree);
extern bool any_value_dependent_elements_p (tree);
extern tree resolve_typename_type (tree, bool);
extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
extern tree build_non_dependent_args (tree);
extern bool reregister_specialization (tree, tree, tree);
extern tree fold_non_dependent_expr (tree);
extern bool explicit_class_specialization_p (tree);
extern tree outermost_tinst_level (void);
/* in repo.c */
extern void init_repo (void);
extern int repo_emit_p (tree);
extern bool repo_export_class_p (tree);
extern void finish_repo (void);
/* in rtti.c */
/* A vector of all tinfo decls that haven't been emitted yet. */
extern GTY(()) VEC(tree,gc) *unemitted_tinfo_decls;
extern void init_rtti_processing (void);
extern tree build_typeid (tree);
extern tree get_tinfo_decl (tree);
extern tree get_typeid (tree);
extern tree build_dynamic_cast (tree, tree);
extern void emit_support_tinfos (void);
extern bool emit_tinfo_decl (tree);
/* in search.c */
extern bool accessible_base_p (tree, tree, bool);
extern tree lookup_base (tree, tree, base_access,
base_kind *);
extern tree dcast_base_hint (tree, tree);
extern int accessible_p (tree, tree, bool);
extern tree lookup_field_1 (tree, tree, bool);
extern tree lookup_field (tree, tree, int, bool);
extern int lookup_fnfields_1 (tree, tree);
extern int class_method_index_for_fn (tree, tree);
extern tree lookup_fnfields (tree, tree, int);
extern tree lookup_member (tree, tree, int, bool);
extern int look_for_overrides (tree, tree);
extern void get_pure_virtuals (tree);
extern void maybe_suppress_debug_info (tree);
extern void note_debug_info_needed (tree);
extern void print_search_statistics (void);
extern void reinit_search_statistics (void);
extern tree current_scope (void);
extern int at_function_scope_p (void);
extern bool at_class_scope_p (void);
extern bool at_namespace_scope_p (void);
extern tree context_for_name_lookup (tree);
extern tree lookup_conversions (tree);
extern tree binfo_from_vbase (tree);
extern tree binfo_for_vbase (tree, tree);
extern tree look_for_overrides_here (tree, tree);
#define dfs_skip_bases ((tree)1)
extern tree dfs_walk_all (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree dfs_walk_once (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree binfo_via_virtual (tree, tree);
extern tree build_baselink (tree, tree, tree, tree);
extern tree adjust_result_of_qualified_name_lookup
(tree, tree, tree);
extern tree copied_binfo (tree, tree);
extern tree original_binfo (tree, tree);
extern int shared_member_p (tree);
/* The representation of a deferred access check. */
typedef struct deferred_access_check GTY(())
{
/* The base class in which the declaration is referenced. */
tree binfo;
/* The declaration whose access must be checked. */
tree decl;
/* The declaration that should be used in the error message. */
tree diag_decl;
} deferred_access_check;
DEF_VEC_O(deferred_access_check);
DEF_VEC_ALLOC_O(deferred_access_check,gc);
/* in semantics.c */
extern void push_deferring_access_checks (deferring_kind);
extern void resume_deferring_access_checks (void);
extern void stop_deferring_access_checks (void);
extern void pop_deferring_access_checks (void);
extern VEC (deferred_access_check,gc)* get_deferred_access_checks (void);
extern void pop_to_parent_deferring_access_checks (void);
extern void perform_access_checks (VEC (deferred_access_check,gc)*);
extern void perform_deferred_access_checks (void);
extern void perform_or_defer_access_check (tree, tree, tree);
extern int stmts_are_full_exprs_p (void);
extern void init_cp_semantics (void);
extern tree do_poplevel (tree);
extern void add_decl_expr (tree);
extern tree finish_expr_stmt (tree);
extern tree begin_if_stmt (void);
extern void finish_if_stmt_cond (tree, tree);
extern tree finish_then_clause (tree);
extern void begin_else_clause (tree);
extern void finish_else_clause (tree);
extern void finish_if_stmt (tree);
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
extern tree begin_while_stmt (tree);
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
extern void finish_while_stmt_cond (tree, tree);
extern void finish_while_stmt (tree);
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
extern tree begin_do_stmt (tree);
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
extern void finish_do_body (tree);
extern void finish_do_stmt (tree, tree);
extern tree finish_return_stmt (tree);
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
extern tree begin_for_stmt (tree);
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
extern void finish_for_init_stmt (tree);
extern void finish_for_cond (tree, tree);
extern void finish_for_expr (tree, tree);
extern void finish_for_stmt (tree);
extern tree finish_break_stmt (void);
extern tree finish_continue_stmt (void);
extern tree begin_switch_stmt (void);
extern void finish_switch_cond (tree, tree);
extern void finish_switch_stmt (tree);
extern tree finish_case_label (tree, tree);
extern tree finish_goto_stmt (tree);
extern tree begin_try_block (void);
extern void finish_try_block (tree);
extern tree begin_eh_spec_block (void);
extern void finish_eh_spec_block (tree, tree);
extern void finish_handler_sequence (tree);
extern tree begin_function_try_block (tree *);
extern void finish_function_try_block (tree);
extern void finish_function_handler_sequence (tree, tree);
extern void finish_cleanup_try_block (tree);
extern tree begin_handler (void);
extern void finish_handler_parms (tree, tree);
extern void finish_handler (tree);
extern void finish_cleanup (tree, tree);
enum {
BCS_NO_SCOPE = 1,
BCS_TRY_BLOCK = 2,
BCS_FN_BODY = 4
};
extern tree begin_compound_stmt (unsigned int);
extern void finish_compound_stmt (tree);
extern tree finish_asm_stmt (int, tree, tree, tree, tree);
extern tree finish_label_stmt (tree);
extern void finish_label_decl (tree);
extern tree finish_parenthesized_expr (tree);
extern tree finish_non_static_data_member (tree, tree, tree);
extern tree begin_stmt_expr (void);
extern tree finish_stmt_expr_expr (tree, tree);
extern tree finish_stmt_expr (tree, bool);
extern tree perform_koenig_lookup (tree, tree);
extern tree finish_call_expr (tree, tree, bool, bool);
extern tree finish_increment_expr (tree, enum tree_code);
extern tree finish_this_expr (void);
extern tree finish_pseudo_destructor_expr (tree, tree, tree);
extern tree finish_unary_op_expr (enum tree_code, tree);
extern tree finish_compound_literal (tree, VEC(constructor_elt,gc) *);
extern tree finish_fname (tree);
extern void finish_translation_unit (void);
extern tree finish_template_type_parm (tree, tree);
extern tree finish_template_template_parm (tree, tree);
extern tree begin_class_definition (tree, tree);
extern void finish_template_decl (tree);
extern tree finish_template_type (tree, tree, int);
extern tree finish_base_specifier (tree, tree, bool);
extern void finish_member_declaration (tree);
extern void qualified_name_lookup_error (tree, tree, tree);
extern void check_template_keyword (tree);
extern tree finish_id_expression (tree, tree, tree,
cp_id_kind *,
bool, bool, bool *,
bool, bool, bool, bool,
const char **);
extern tree finish_typeof (tree);
extern tree finish_offsetof (tree);
extern void finish_decl_cleanup (tree, tree);
extern void finish_eh_cleanup (tree);
extern void expand_body (tree);
extern void finish_mem_initializers (tree);
extern tree check_template_template_default_arg (tree);
extern void expand_or_defer_fn (tree);
extern void check_accessibility_of_qualified_id (tree, tree, tree);
extern tree finish_qualified_id_expr (tree, tree, bool, bool,
bool, bool);
extern void simplify_aggr_init_expr (tree *);
extern void finalize_nrv (tree *, tree, tree);
extern void note_decl_for_pch (tree);
extern tree finish_omp_clauses (tree);
extern void finish_omp_threadprivate (tree);
extern tree begin_omp_structured_block (void);
extern tree finish_omp_structured_block (tree);
extern tree begin_omp_parallel (void);
extern tree finish_omp_parallel (tree, tree);
extern tree finish_omp_for (location_t, tree, tree,
tree, tree, tree, tree);
extern void finish_omp_atomic (enum tree_code, tree, tree);
extern void finish_omp_barrier (void);
extern void finish_omp_flush (void);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree);
extern tree cxx_omp_clause_default_ctor (tree, tree);
extern tree cxx_omp_clause_copy_ctor (tree, tree, tree);
extern tree cxx_omp_clause_assign_op (tree, tree, tree);
extern tree cxx_omp_clause_dtor (tree, tree);
extern bool cxx_omp_privatize_by_reference (tree);
extern tree baselink_for_fns (tree);
/* in tree.c */
extern void lang_check_failed (const char *, int,
const char *) ATTRIBUTE_NORETURN;
extern tree stabilize_expr (tree, tree *);
extern void stabilize_call (tree, tree *);
extern bool stabilize_init (tree, tree *);
extern tree add_stmt_to_compound (tree, tree);
extern tree cxx_maybe_build_cleanup (tree);
extern void init_tree (void);
extern int pod_type_p (tree);
extern bool class_tmpl_impl_spec_p (tree);
extern int zero_init_p (tree);
extern tree canonical_type_variant (tree);
extern tree copy_binfo (tree, tree, tree,
tree *, int);
extern int member_p (tree);
extern cp_lvalue_kind real_lvalue_p (tree);
extern bool builtin_valid_in_constant_expr_p (tree);
extern tree build_min (enum tree_code, tree, ...);
extern tree build_min_nt (enum tree_code, ...);
extern tree build_min_non_dep (enum tree_code, tree, ...);
extern tree build_cplus_new (tree, tree);
extern tree get_target_expr (tree);
extern tree build_cplus_array_type (tree, tree);
extern tree hash_tree_cons (tree, tree, tree);
extern tree hash_tree_chain (tree, tree);
extern tree build_qualified_name (tree, tree, tree, bool);
extern int is_overloaded_fn (tree);
extern tree get_first_fn (tree);
extern tree ovl_cons (tree, tree);
extern tree build_overload (tree, tree);
extern const char *cxx_printable_name (tree, int);
extern tree build_exception_variant (tree, tree);
extern tree bind_template_template_parm (tree, tree);
extern tree array_type_nelts_total (tree);
extern tree array_type_nelts_top (tree);
extern tree break_out_target_exprs (tree);
extern tree get_type_decl (tree);
extern tree decl_namespace_context (tree);
extern bool decl_anon_ns_mem_p (tree);
extern tree lvalue_type (tree);
extern tree error_type (tree);
extern int varargs_function_p (tree);
extern bool really_overloaded_fn (tree);
extern bool cp_tree_equal (tree, tree);
extern tree no_linkage_check (tree, bool);
extern void debug_binfo (tree);
extern tree build_dummy_object (tree);
extern tree maybe_dummy_object (tree, tree *);
extern int is_dummy_object (tree);
extern const struct attribute_spec cxx_attribute_table[];
extern tree make_ptrmem_cst (tree, tree);
extern tree cp_build_type_attribute_variant (tree, tree);
extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t);
#define cp_build_qualified_type(TYPE, QUALS) \
cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error)
extern special_function_kind special_function_p (tree);
extern int count_trees (tree);
extern int char_type_p (tree);
extern void verify_stmt_tree (tree);
extern linkage_kind decl_linkage (tree);
extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn,
void*, struct pointer_set_t*);
extern int cp_cannot_inline_tree_fn (tree*);
extern tree cp_add_pending_fn_decls (void*,tree);
extern int cp_auto_var_in_fn_p (tree,tree);
extern tree fold_if_not_in_template (tree);
extern tree rvalue (tree);
extern tree convert_bitfield_to_declared_type (tree);
extern tree cp_save_expr (tree);
extern bool cast_valid_in_integral_constant_expression_p (tree);
/* in typeck.c */
extern int string_conv_p (tree, tree, int);
extern tree cp_truthvalue_conversion (tree);
extern tree condition_conversion (tree);
extern tree require_complete_type (tree);
extern tree complete_type (tree);
extern tree complete_type_or_else (tree, tree);
extern int type_unknown_p (tree);
extern bool comp_except_specs (tree, tree, bool);
extern bool comptypes (tree, tree, int);
extern bool compparms (tree, tree);
extern int comp_cv_qualification (tree, tree);
extern int comp_cv_qual_signature (tree, tree);
extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code);
extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool);
#define cxx_sizeof_nowarn(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, false)
extern tree inline_conversion (tree);
extern tree is_bitfield_expr_with_lowered_type (tree);
extern tree unlowered_expr_type (tree);
extern tree decay_conversion (tree);
extern tree build_class_member_access_expr (tree, tree, tree, bool);
extern tree finish_class_member_access_expr (tree, tree, bool);
extern tree build_x_indirect_ref (tree, const char *);
extern tree build_indirect_ref (tree, const char *);
extern tree build_array_ref (tree, tree);
extern tree get_member_function_from_ptrfunc (tree *, tree);
extern tree build_x_binary_op (enum tree_code, tree,
enum tree_code, tree,
enum tree_code, bool *);
extern tree build_x_unary_op (enum tree_code, tree);
extern tree unary_complex_lvalue (enum tree_code, tree);
extern tree build_x_conditional_expr (tree, tree, tree);
extern tree build_x_compound_expr_from_list (tree, const char *);
extern tree build_x_compound_expr (tree, tree);
extern tree build_compound_expr (tree, tree);
extern tree build_static_cast (tree, tree);
extern tree build_reinterpret_cast (tree, tree);
extern tree build_const_cast (tree, tree);
extern tree build_c_cast (tree, tree);
extern tree build_x_modify_expr (tree, enum tree_code, tree);
extern tree build_modify_expr (tree, enum tree_code, tree);
extern tree convert_for_initialization (tree, tree, tree, int,
const char *, tree, int);
extern int comp_ptr_ttypes (tree, tree);
extern bool comp_ptr_ttypes_const (tree, tree);
extern int ptr_reasonably_similar (tree, tree);
extern tree build_ptrmemfunc (tree, tree, int, bool);
extern int cp_type_quals (tree);
extern bool cp_type_readonly (tree);
extern bool cp_has_mutable_p (tree);
extern bool at_least_as_qualified_p (tree, tree);
extern void cp_apply_type_quals_to_decl (int, tree);
extern tree build_ptrmemfunc1 (tree, tree, tree);
extern void expand_ptrmemfunc_cst (tree, tree *, tree *);
extern tree type_after_usual_arithmetic_conversions (tree, tree);
extern tree composite_pointer_type (tree, tree, tree, tree,
const char*);
extern tree merge_types (tree, tree);
extern tree check_return_expr (tree, bool *);
#define cp_build_binary_op(code, arg1, arg2) \
build_binary_op(code, arg1, arg2, 1)
#define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, true)
extern tree build_ptrmemfunc_access_expr (tree, tree);
extern tree build_address (tree);
extern tree build_nop (tree, tree);
extern tree non_reference (tree);
extern tree lookup_anon_field (tree, tree);
extern bool invalid_nonstatic_memfn_p (tree);
extern tree convert_member_func_to_ptr (tree, tree);
extern tree convert_ptrmem (tree, tree, bool, bool);
extern int lvalue_or_else (tree, enum lvalue_use);
extern int lvalue_p (tree);
/* in typeck2.c */
extern void require_complete_eh_spec_types (tree, tree);
extern void cxx_incomplete_type_diagnostic (tree, tree, int);
#undef cxx_incomplete_type_error
extern void cxx_incomplete_type_error (tree, tree);
#define cxx_incomplete_type_error(V,T) \
(cxx_incomplete_type_diagnostic ((V), (T), 0))
extern tree error_not_base_type (tree, tree);
extern tree binfo_or_else (tree, tree);
extern void readonly_error (tree, const char *, int);
extern void complete_type_check_abstract (tree);
extern int abstract_virtuals_error (tree, tree);
extern tree store_init_value (tree, tree);
extern tree digest_init (tree, tree);
extern tree build_scoped_ref (tree, tree, tree *);
extern tree build_x_arrow (tree);
extern tree build_m_component_ref (tree, tree);
extern tree build_functional_cast (tree, tree);
extern tree add_exception_specifier (tree, tree, int);
extern tree merge_exception_specifiers (tree, tree);
/* in mangle.c */
extern void init_mangle (void);
extern void mangle_decl (tree);
extern const char *mangle_type_string (tree);
extern tree mangle_typeinfo_for_type (tree);
extern tree mangle_typeinfo_string_for_type (tree);
extern tree mangle_vtbl_for_type (tree);
extern tree mangle_vtt_for_type (tree);
extern tree mangle_ctor_vtbl_for_type (tree, tree);
extern tree mangle_thunk (tree, int, tree, tree);
extern tree mangle_conv_op_name_for_type (tree);
extern tree mangle_guard_variable (tree);
extern tree mangle_ref_init_variable (tree);
/* in dump.c */
extern bool cp_dump_tree (void *, tree);
/* In cp/cp-objcp-common.c. */
extern HOST_WIDE_INT cxx_get_alias_set (tree);
extern bool cxx_warn_unused_global_decl (tree);
extern tree cp_expr_size (tree);
extern size_t cp_tree_size (enum tree_code);
extern bool cp_var_mod_type_p (tree, tree);
extern void cxx_initialize_diagnostics (struct diagnostic_context *);
extern int cxx_types_compatible_p (tree, tree);
extern void init_shadowed_var_for_decl (void);
extern tree cxx_staticp (tree);
/* in cp-gimplify.c */
extern int cp_gimplify_expr (tree *, tree *, tree *);
extern void cp_genericize (tree);
/* -- end of C++ */
/* In order for the format checking to accept the C++ frontend
diagnostic framework extensions, you must include this file before
toplev.h, not after. We override the definition of GCC_DIAG_STYLE
in c-common.h. */
#undef GCC_DIAG_STYLE
#define GCC_DIAG_STYLE __gcc_cxxdiag__
#if GCC_VERSION >= 4001
#define ATTRIBUTE_GCC_CXXDIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m, n))) ATTRIBUTE_NONNULL(m)
#else
#define ATTRIBUTE_GCC_CXXDIAG(m, n) ATTRIBUTE_NONNULL(m)
#endif
extern void cp_cpp_error (cpp_reader *, int,
const char *, va_list *)
ATTRIBUTE_GCC_CXXDIAG(3,0);
/* APPLE LOCAL radar 5741070 */
extern tree c_return_interface_record_type (tree);
/* APPLE LOCAL begin blocks 6040305 (cg) */
extern cp_declarator* make_block_pointer_declarator (tree, cp_cv_quals,
cp_declarator *);
/* APPLE LOCAL end blocks 6040305 (cg) */
#endif /* ! GCC_CP_TREE_H */
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/delegate.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/timer.h"
#include "magick/timer-private.h"
#include "magick/token.h"
#include "magick/token-private.h"
#include "magick/utility.h"
#include "magick/version.h"
#include "magick/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) memset(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MaxTextExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
image->blur=1.0;
InitializeExceptionInfo(&image->exception);
(void) QueryColorDatabase(BackgroundColor,&image->background_color,
&image->exception);
(void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception);
(void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception);
(void) QueryColorDatabase(TransparentColor,&image->transparent_color,
&image->exception);
GetTimerInfo(&image->timer);
image->ping=MagickFalse;
image->cache=AcquirePixelCache(0);
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=GetMagickTime();
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AllocateSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MaxTextExtent);
(void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
(void) memset(&geometry,0,sizeof(geometry));
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
if ((flags & RhoValue) != 0)
image->x_resolution=geometry_info.rho;
image->y_resolution=image->x_resolution;
if ((flags & SigmaValue) != 0)
image->y_resolution=geometry_info.sigma;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->matte_color=image_info->matte_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
(void) SyncImageSettings(image_info,image);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MaxTextExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MaxTextExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
matte,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
matte=images->matte;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass) == MagickFalse)
{
InheritException(exception,&append_image->exception);
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace);
append_image->depth=depth;
append_image->matte=matte;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict append_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
append_indexes=GetCacheViewAuthenticIndexQueue(append_view);
for (x=0; x < (ssize_t) next->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (next->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if ((next->colorspace == CMYKColorspace) &&
(append_image->colorspace == CMYKColorspace))
SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
GetImageException(image,exception);
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image)
{
return(ClipImagePath(image,"#1",MagickTrue));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(&image->exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask);
if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse);
(void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
double
scale;
Image
*clone_image;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) memset(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->channels=image->channels;
clone_image->colorspace=image->colorspace;
clone_image->matte=image->matte;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
InitializeExceptionInfo(&clone_image->exception);
InheritException(&clone_image->exception,&image->exception);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->extent=image->extent;
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MaxTextExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent);
(void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
clone_image->clip_mask=NewImageList();
clone_image->mask=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AllocateSemaphoreInfo();
if (image->colormap != (PixelPacket *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelPacket *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
if ((columns == image->columns) && (rows == image->rows))
{
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows) == MagickFalse)
{
InheritException(exception,&clone_image->exception);
clone_image=DestroyImage(clone_image);
}
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->pen=image_info->pen;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->matte_color=image_info->matte_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colors=image_info->colors;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->preview_type=image_info->preview_type;
clone_info->group=image_info->group;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
if (image_info->view != (char *) NULL)
(void) CloneString(&clone_info->view,image_info->view);
if (image_info->authenticate != (char *) NULL)
(void) CloneString(&clone_info->authenticate,image_info->authenticate);
(void) CloneImageOptions(clone_info,image_info);
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->virtual_pixel_method=image_info->virtual_pixel_method;
(void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent);
(void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MaxTextExtent);
clone_info->subimage=image_info->scene; /* deprecated */
clone_info->subrange=image_info->number_scenes; /* deprecated */
clone_info->channel=image_info->channel;
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return the highest severity exception.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
register const IndexPacket
*magick_restrict source_indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
source_indexes=GetCacheViewVirtualIndexQueue(source_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) geometry->width; x++)
{
*q=(*p);
if (image->colorspace == CMYKColorspace)
indexes[x]=source_indexes[x];
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CopyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
source_view=DestroyCacheView(source_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelPacket *) NULL)
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info*) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
DestroyBlob(image);
(void) ClearExceptionInfo(&image->exception,MagickTrue);
if (image->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
if (image_info->authenticate != (char *) NULL)
image_info->authenticate=DestroyString(
image_info->authenticate);
DestroyImageOptions(image_info);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClipMask() returns the clip path associated with the image.
%
% The format of the GetImageClipMask method is:
%
% Image *GetImageClipMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageClipMask(const Image *image,
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->clip_mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->clip_mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageException() traverses an image sequence and returns any
% error more severe than noted by the exception parameter.
%
% The format of the GetImageException method is:
%
% void GetImageException(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a list of one or more images.
%
% o exception: return the highest severity exception.
%
*/
MagickExport void GetImageException(Image *image,ExceptionInfo *exception)
{
register Image
*next;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->exception.severity == UndefinedException)
continue;
if (next->exception.severity > exception->severity)
InheritException(exception,&next->exception);
next->exception.severity=UndefinedException;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) memset(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorDatabase(BackgroundColor,&image_info->background_color,
exception);
(void) QueryColorDatabase(BorderColor,&image_info->border_color,exception);
(void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception);
(void) QueryColorDatabase(TransparentColor,&image_info->transparent_color,
exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannels() returns the number of pixel channels associated with the
% specified image.
%
% The format of the GetChannels method is:
%
% size_t GetImageChannels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport size_t GetImageChannels(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(image->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
ssize_t
field_width,
offset;
canonical=MagickFalse;
offset=0;
(void) CopyMagickString(filename,format,MaxTextExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
field_width=0;
if (*q == '0')
field_width=(ssize_t) strtol(q,&q,10);
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format-offset),(size_t)
(MaxTextExtent-(p-format-offset)),p,value);
offset+=(4-field_width);
*q=c;
(void) ConcatenateMagickString(filename,q,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MaxTextExtent];
const char
*value;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
value=(const char *) NULL;
if (image != (Image *) NULL)
value=GetImageProperty(image,pattern);
if ((value == (const char *) NULL) &&
(image != (Image *) NULL))
value=GetImageArtifact(image,pattern);
if ((value == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
value=GetImageOption(image_info,pattern);
if (value == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-offset),value,(size_t)
(MaxTextExtent-(p-format-offset)));
offset+=strlen(pattern)-strlen(value)+3;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MaxTextExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((pixel.red < 0.0) || (pixel.red > QuantumRange) ||
(pixel.red != (QuantumAny) pixel.red))
break;
if ((pixel.green < 0.0) || (pixel.green > QuantumRange) ||
(pixel.green != (QuantumAny) pixel.green))
break;
if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) ||
(pixel.blue != (QuantumAny) pixel.blue))
break;
if (pixel.matte != MagickFalse)
{
if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) ||
(pixel.opacity != (QuantumAny) pixel.opacity))
break;
}
if (pixel.colorspace == CMYKColorspace)
{
if ((pixel.index < 0.0) || (pixel.index > QuantumRange) ||
(pixel.index != (QuantumAny) pixel.index))
break;
}
p++;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MaxTextExtent],
filename[MaxTextExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MaxTextExtent);
(void) CopyMagickString(filename,image->filename,MaxTextExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const MagickPixelPacket *background)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const MagickPixelPacket *background)
{
CacheView
*image_view;
ExceptionInfo
*exception;
Image
*image;
ssize_t
y;
MagickBooleanType
status;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const MagickPixelPacket *) NULL);
image=AcquireImage(image_info);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->matte=background->matte;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,background,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePixels() reset the image pixels, that is, all the pixel components
% are zereod.
%
% The format of the SetImage method is:
%
% MagickBooleanType ResetImagePixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResetImagePixels(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const void
*pixels;
MagickBooleanType
status;
MagickSizeType
length;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
pixels=AcquirePixelCachePixels(image,&length,exception);
if (pixels != (void *) NULL)
{
/*
Reset in-core image pixels.
*/
(void) memset((void *) pixels,0,(size_t) length);
return(MagickTrue);
}
/*
Reset image pixels.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) memset(q,0,sizeof(PixelPacket));
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
indexes[x]=0;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) TransformImageColorspace(image,RGBColorspace);
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
pixel.opacity=OpaqueOpacity;
SetPixelPacket(image,&background,&pixel,&index);
/*
Set image background color.
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
*q++=pixel;
if (image->colorspace == CMYKColorspace)
{
register IndexPacket
*magick_restrict indexes;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannels() sets the number of pixels channels associated with the
% image.
%
% The format of the SetImageChannels method is:
%
% MagickBooleanType SetImageChannels(Image *image,const size_t channels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channels: The number of pixel channels.
%
*/
MagickExport MagickBooleanType SetImageChannels(Image *image,
const size_t channels)
{
image->channels=channels;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,
% const MagickPixelPacket *color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const MagickPixelPacket *color)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const MagickPixelPacket *) NULL);
image->colorspace=color->colorspace;
image->matte=color->matte;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,color,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageClipMask() associates a clip path with the image. The clip path
% must be the same dimensions as the image. Set any pixel component of
% the clip path to TransparentOpacity to prevent that corresponding image
% pixel component from being updated when SyncAuthenticPixels() is applied.
%
% The format of the SetImageClipMask method is:
%
% MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clip_mask: the image clip path.
%
*/
MagickExport MagickBooleanType SetImageClipMask(Image *image,
const Image *clip_mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (clip_mask != (const Image *) NULL)
if ((clip_mask->columns != image->columns) ||
(clip_mask->rows != image->rows))
ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
image->clip_mask=NewImageList();
if (clip_mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception);
if (image->clip_mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryImageException(ImageError,"NegativeOrZeroImageSize",
image->filename);
image->columns=columns;
image->rows=rows;
if ((image->depth == 0) || (image->depth > (8*sizeof(MagickSizeType))))
ThrowBinaryImageException(ImageError,"ImageDepthNotSupported",
image->filename);
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the `magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, `ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: `image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
extension[MaxTextExtent],
filename[MaxTextExtent],
magic[MaxTextExtent],
*q,
subimage[MaxTextExtent];
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
unsigned char
magick[2*MaxTextExtent];
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*subimage='\0';
GetPathComponent(image_info->filename,SubimagePath,subimage);
if (*subimage != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse)
{
if (IsGeometry(subimage) != MagickFalse)
(void) CloneString(&image_info->extract,subimage);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,subimage);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
image_info->subimage=image_info->scene;
image_info->subrange=image_info->number_scenes;
}
}
*extension='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,extension);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*extension != '\0')
if ((LocaleCompare(extension,"gz") == 0) ||
(LocaleCompare(extension,"Z") == 0) ||
(LocaleCompare(extension,"svgz") == 0) ||
(LocaleCompare(extension,"wmz") == 0))
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*extension != '\0')
if (LocaleCompare(extension,"bz2") == 0)
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if ((*extension != '\0') && (IsGlob(extension) == MagickFalse))
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,extension,MaxTextExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MaxTextExtent);
magick_info=GetMagickInfo(magic,sans_exception);
if (frames == 0)
GetPathComponent(image_info->filename,CanonicalPath,filename);
else
GetPathComponent(image_info->filename,SubcanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,filename);
if ((LocaleCompare(filename,image_info->filename) != 0) &&
(strchr(filename,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
/*
Determine the image format from the first few bytes of the file.
*/
image=AcquireImage(image_info);
(void) CopyMagickString(image->filename,image_info->filename,
MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy image to a seekable temporary file.
*/
*filename='\0';
status=ImageToFile(image,filename,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,filename,MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
image_info->temporary=MagickTrue;
}
(void) memset(magick,0,sizeof(magick));
count=ReadBlob(image,2*MaxTextExtent,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
(void) CopyMagickString(image_info->magick,GetMagicName(magic_info),
MaxTextExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const Image *mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mask: the image mask.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask != (const Image *) NULL)
if ((mask->columns != image->columns) || (mask->rows != image->rows))
ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=NewImageList();
if (mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception);
if (image->mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e O p a c i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageOpacity() sets the opacity levels of the image.
%
% The format of the SetImageOpacity method is:
%
% MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageOpacity(Image *image,
const Quantum opacity)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,opacity);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const PixelPacket
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const PixelPacket
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
CacheView
*smush_view;
const Image
*image;
Image
*smush_image;
MagickBooleanType
matte,
proceed,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
matte=image->matte;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse)
{
InheritException(exception,&smush_image->exception);
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->matte=matte;
(void) SetImageBackgroundColor(smush_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
smush_view=AcquireVirtualCacheView(smush_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
smush_view=DestroyCacheView(smush_view);
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType StripImage(Image *image)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline IndexPacket PushColormapIndex(Image *image,
const size_t index,MagickBooleanType *range_exception)
{
if (index < image->colors)
return((IndexPacket) index);
*range_exception=MagickTrue;
return((IndexPacket) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelPacket *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x),
&range_exception);
if (image->matte == MagickFalse)
SetPixelRgb(q,image->colormap+(ssize_t) index)
else
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs image_info options into per-image attributes.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image)
{
char
property[MaxTextExtent];
const char
*option,
*value;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->background_color,
&image->exception);
option=GetImageOption(image_info,"bias");
if (option != (const char *) NULL)
image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->border_color,&image->exception);
option=GetImageOption(image_info,"colors");
if (option != (const char *) NULL)
image->colors=StringToUnsignedLong(option);
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
/*
Set image density.
*/
flags=ParseGeometry(option,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(InterpolatePixelMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->matte_color,&image->exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->transparent_color,
&image->exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->x_resolution=(double) ((size_t) (100.0*2.54*
image->x_resolution+0.5))/100.0;
image->y_resolution=(double) ((size_t) (100.0*2.54*
image->y_resolution+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
}
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
ResetImageOptionIterator(image_info);
for (option=GetNextImageOption(image_info); option != (const char *) NULL; )
{
value=GetImageOption(image_info,option);
if (value != (const char *) NULL)
{
(void) FormatLocaleString(property,MaxTextExtent,"%s",option);
(void) SetImageArtifact(image,property,value);
}
option=GetNextImageOption(image_info);
}
return(MagickTrue);
}
|
ParallelFor.h | /*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include <stdio.h> //printf debugging
#include <algorithm>
// choose threading providers:
#if BT_USE_TBB
#define USE_TBB 1 // use Intel Threading Building Blocks for thread management
#endif
#if BT_USE_PPL
#define USE_PPL 1 // use Microsoft Parallel Patterns Library (installed with Visual Studio 2010 and later)
#endif // BT_USE_PPL
#if BT_USE_OPENMP
#define USE_OPENMP 1 // use OpenMP (also need to change compiler options for OpenMP support)
#endif
#if USE_OPENMP
#include <omp.h>
#endif // #if USE_OPENMP
#if USE_PPL
#include <ppl.h> // if you get a compile error here, check whether your version of Visual Studio includes PPL
// Visual Studio 2010 and later should come with it
#include <concrtrm.h> // for GetProcessorCount()
#endif // #if USE_PPL
#if USE_TBB
#define __TBB_NO_IMPLICIT_LINKAGE 1
#include <tbb/tbb.h>
#include <tbb/task_scheduler_init.h>
#include <tbb/parallel_for.h>
#include <tbb/blocked_range.h>
#endif // #if USE_TBB
class TaskManager
{
public:
enum Api
{
apiNone,
apiOpenMP,
apiTbb,
apiPpl,
apiCount
};
static const char* getApiName( Api api )
{
switch ( api )
{
case apiNone: return "None";
case apiOpenMP: return "OpenMP";
case apiTbb: return "Intel TBB";
case apiPpl: return "MS PPL";
default: return "unknown";
}
}
TaskManager()
{
m_api = apiNone;
m_numThreads = 0;
#if USE_TBB
m_tbbSchedulerInit = NULL;
#endif // #if USE_TBB
}
Api getApi() const
{
return m_api;
}
bool isSupported( Api api ) const
{
#if USE_OPENMP
if ( api == apiOpenMP )
{
return true;
}
#endif
#if USE_TBB
if ( api == apiTbb )
{
return true;
}
#endif
#if USE_PPL
if ( api == apiPpl )
{
return true;
}
#endif
// apiNone is always "supported"
return api == apiNone;
}
void setApi( Api api )
{
if (isSupported(api))
{
m_api = api;
}
else
{
// no compile time support for selected API, fallback to "none"
m_api = apiNone;
}
}
static int getMaxNumThreads()
{
#if USE_OPENMP
return omp_get_max_threads();
#elif USE_PPL
return concurrency::GetProcessorCount();
#elif USE_TBB
return tbb::task_scheduler_init::default_num_threads();
#else
return 1;
#endif
}
int getNumThreads() const
{
return m_numThreads;
}
int setNumThreads( int numThreads )
{
m_numThreads = ( std::max )( 1, numThreads );
#if USE_OPENMP
omp_set_num_threads( m_numThreads );
#endif
#if USE_PPL
{
using namespace concurrency;
if ( CurrentScheduler::Id() != -1 )
{
CurrentScheduler::Detach();
}
SchedulerPolicy policy;
policy.SetConcurrencyLimits( m_numThreads, m_numThreads );
CurrentScheduler::Create( policy );
}
#endif
#if USE_TBB
if ( m_tbbSchedulerInit )
{
delete m_tbbSchedulerInit;
m_tbbSchedulerInit = NULL;
}
m_tbbSchedulerInit = new tbb::task_scheduler_init( m_numThreads );
#endif
return m_numThreads;
}
void init( int numThread = 0 )
{
if (m_numThreads == 0)
{
#if USE_PPL
setApi( apiPpl );
#endif
#if USE_TBB
setApi( apiTbb );
#endif
#if USE_OPENMP
setApi( apiOpenMP );
#endif
if (numThread == 0)
numThread = getMaxNumThreads();
int threadCount = std::min( numThread, getMaxNumThreads() );
setNumThreads( threadCount );
}
else
{
setNumThreads(m_numThreads);
}
}
void shutdown()
{
#if USE_TBB
if ( m_tbbSchedulerInit )
{
delete m_tbbSchedulerInit;
m_tbbSchedulerInit = NULL;
}
#endif
}
private:
Api m_api;
int m_numThreads;
#if USE_TBB
tbb::task_scheduler_init* m_tbbSchedulerInit;
#endif // #if USE_TBB
};
extern TaskManager gTaskMgr;
inline static void initTaskScheduler()
{
gTaskMgr.init();
}
inline static void cleanupTaskScheduler()
{
gTaskMgr.shutdown();
}
#if USE_TBB
///
/// TbbBodyAdapter -- Converts a body object that implements the
/// "forLoop(int iBegin, int iEnd) const" function
/// into a TBB compatible object that takes a tbb::blocked_range<int> type.
///
template <class TBody>
struct TbbBodyAdapter
{
const TBody* mBody;
void operator()( const tbb::blocked_range<int>& range ) const
{
mBody->forLoop( range.begin(), range.end() );
}
};
#endif // #if USE_TBB
#if USE_PPL
///
/// PplBodyAdapter -- Converts a body object that implements the
/// "forLoop(int iBegin, int iEnd) const" function
/// into a PPL compatible object that implements "void operator()( int ) const"
///
template <class TBody>
struct PplBodyAdapter
{
const TBody* mBody;
int mGrainSize;
int mIndexEnd;
void operator()( int i ) const
{
mBody->forLoop( i, (std::min)(i + mGrainSize, mIndexEnd) );
}
};
#endif // #if USE_PPL
///
/// parallelFor -- interface for submitting work expressed as a for loop to the worker threads
///
template <class TBody>
void parallelFor( int iBegin, int iEnd, int grainSize, const TBody& body )
{
#if USE_OPENMP
if ( gTaskMgr.getApi() == TaskManager::apiOpenMP )
{
#pragma omp parallel for schedule(static, 1)
for ( int i = iBegin; i < iEnd; i += grainSize )
{
body.forLoop( i, (std::min)( i + grainSize, iEnd ) );
}
return;
}
#endif // #if USE_OPENMP
#if USE_PPL
if ( gTaskMgr.getApi() == TaskManager::apiPpl )
{
// PPL dispatch
PplBodyAdapter<TBody> pplBody;
pplBody.mBody = &body;
pplBody.mGrainSize = grainSize;
pplBody.mIndexEnd = iEnd;
// note: MSVC 2010 doesn't support partitioner args, so avoid them
concurrency::parallel_for( iBegin,
iEnd,
grainSize,
pplBody
);
return;
}
#endif //#if USE_PPL
#if USE_TBB
if ( gTaskMgr.getApi() == TaskManager::apiTbb )
{
// TBB dispatch
TbbBodyAdapter<TBody> tbbBody;
tbbBody.mBody = &body;
tbb::parallel_for( tbb::blocked_range<int>( iBegin, iEnd, grainSize ),
tbbBody,
tbb::simple_partitioner()
);
return;
}
#endif // #if USE_TBB
{
// run on main thread
body.forLoop( iBegin, iEnd );
}
}
|
runner.c | #define _CRT_SECURE_NO_WARNINGS
#include <stdint.h>
void ufbxt_assert_fail(const char *file, uint32_t line, const char *expr);
#define ufbx_assert(cond) do { \
if (!(cond)) ufbxt_assert_fail(__FILE__, __LINE__, "Internal assert: " #cond); \
} while (0)
#define ufbxt_arraycount(arr) (sizeof(arr) / sizeof(*(arr)))
#undef ufbx_assert
#include "../ufbx.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <setjmp.h>
#include <stdarg.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#else
static int omp_get_thread_num() { return 0; }
static int omp_get_num_threads() { return 1; }
#endif
// -- Thread local
#ifdef _MSC_VER
#define ufbxt_threadlocal __declspec(thread)
#else
#define ufbxt_threadlocal __thread
#endif
// -- Timing
typedef struct {
uint64_t os_tick;
uint64_t cpu_tick;
} cputime_sync_point;
typedef struct {
cputime_sync_point begin, end;
uint64_t os_freq;
uint64_t cpu_freq;
double rcp_os_freq;
double rcp_cpu_freq;
} cputime_sync_span;
extern const cputime_sync_span *cputime_default_sync;
void cputime_begin_init();
void cputime_end_init();
void cputime_init();
void cputime_begin_sync(cputime_sync_span *span);
void cputime_end_sync(cputime_sync_span *span);
uint64_t cputime_cpu_tick();
uint64_t cputime_os_tick();
double cputime_cpu_delta_to_sec(const cputime_sync_span *span, uint64_t cpu_delta);
double cputime_os_delta_to_sec(const cputime_sync_span *span, uint64_t os_delta);
double cputime_cpu_tick_to_sec(const cputime_sync_span *span, uint64_t cpu_tick);
double cputime_os_tick_to_sec(const cputime_sync_span *span, uint64_t os_tick);
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#define NOMINMAX
#include <Windows.h>
void cputime_sync_now(cputime_sync_point *sync, int accuracy)
{
uint64_t best_delta = UINT64_MAX;
uint64_t os_tick = 0, cpu_tick = 0;
int runs = accuracy ? accuracy : 100;
for (int i = 0; i < runs; i++) {
LARGE_INTEGER begin, end;
QueryPerformanceCounter(&begin);
uint64_t cycle = __rdtsc();
QueryPerformanceCounter(&end);
uint64_t delta = end.QuadPart - begin.QuadPart;
if (delta < best_delta) {
os_tick = (begin.QuadPart + end.QuadPart) / 2;
cpu_tick = cycle;
}
if (delta == 0) break;
}
sync->cpu_tick = cpu_tick;
sync->os_tick = os_tick;
}
uint64_t cputime_cpu_tick()
{
return __rdtsc();
}
uint64_t cputime_os_tick()
{
LARGE_INTEGER res;
QueryPerformanceCounter(&res);
return res.QuadPart;
}
static uint64_t cputime_os_freq()
{
LARGE_INTEGER res;
QueryPerformanceFrequency(&res);
return res.QuadPart;
}
static void cputime_os_wait()
{
Sleep(1);
}
#else
#include <time.h>
// TODO: Other architectures
#include <x86intrin.h>
void cputime_sync_now(cputime_sync_point *sync, int accuracy)
{
uint64_t best_delta = UINT64_MAX;
uint64_t os_tick, cpu_tick;
struct timespec begin, end;
int runs = accuracy ? accuracy : 100;
for (int i = 0; i < runs; i++) {
clock_gettime(CLOCK_REALTIME, &begin);
uint64_t cycle = (uint64_t)__rdtsc();
clock_gettime(CLOCK_REALTIME, &end);
uint64_t begin_ns = (uint64_t)begin.tv_sec*UINT64_C(1000000000) + (uint64_t)begin.tv_nsec;
uint64_t end_ns = (uint64_t)end.tv_sec*UINT64_C(1000000000) + (uint64_t)end.tv_nsec;
uint64_t delta = end_ns - begin_ns;
if (delta < best_delta) {
os_tick = (begin_ns + end_ns) / 2;
cpu_tick = cycle;
}
if (delta == 0) break;
}
sync->cpu_tick = cpu_tick;
sync->os_tick = os_tick;
}
uint64_t cputime_cpu_tick()
{
return (uint64_t)__rdtsc();
}
uint64_t cputime_os_tick()
{
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
return ts.tv_sec*UINT64_C(1000000000) + (uint64_t)ts.tv_nsec;
}
static uint64_t cputime_os_freq()
{
return UINT64_C(1000000000);
}
static void cputime_os_wait()
{
struct timespec duration;
duration.tv_sec = 0;
duration.tv_nsec = 1000000000l;
nanosleep(&duration, NULL);
}
#endif
static cputime_sync_span g_cputime_sync;
const cputime_sync_span *cputime_default_sync = &g_cputime_sync;
void cputime_begin_init()
{
cputime_begin_sync(&g_cputime_sync);
}
void cputime_end_init()
{
cputime_end_sync(&g_cputime_sync);
}
void cputime_init()
{
cputime_begin_init();
cputime_end_init();
}
void cputime_begin_sync(cputime_sync_span *span)
{
cputime_sync_now(&span->begin, 0);
}
void cputime_end_sync(cputime_sync_span *span)
{
uint64_t os_freq = cputime_os_freq();
uint64_t min_span = os_freq / 1000;
uint64_t os_tick = cputime_os_tick();
while (os_tick - span->begin.os_tick <= min_span) {
cputime_os_wait();
os_tick = cputime_os_tick();
}
cputime_sync_now(&span->end, 0);
uint64_t len_os = span->end.os_tick - span->begin.os_tick;
uint64_t len_cpu = span->end.cpu_tick - span->begin.cpu_tick;
double cpu_freq = (double)len_cpu / (double)len_os * (double)os_freq;
span->os_freq = os_freq;
span->cpu_freq = (uint64_t)cpu_freq;
span->rcp_os_freq = 1.0 / (double)os_freq;
span->rcp_cpu_freq = 1.0 / cpu_freq;
}
double cputime_cpu_delta_to_sec(const cputime_sync_span *span, uint64_t cpu_delta)
{
if (!span) span = &g_cputime_sync;
return (double)cpu_delta * span->rcp_cpu_freq;
}
double cputime_os_delta_to_sec(const cputime_sync_span *span, uint64_t os_delta)
{
if (!span) span = &g_cputime_sync;
return (double)os_delta * span->rcp_os_freq;
}
double cputime_cpu_tick_to_sec(const cputime_sync_span *span, uint64_t cpu_tick)
{
if (!span) span = &g_cputime_sync;
return (double)(cpu_tick - span->begin.cpu_tick) * span->rcp_cpu_freq;
}
double cputime_os_tick_to_sec(const cputime_sync_span *span, uint64_t os_tick)
{
if (!span) span = &g_cputime_sync;
return (double)(os_tick - span->begin.os_tick) * span->rcp_os_freq;
}
// -- Vector helpers
static ufbx_real ufbxt_dot2(ufbx_vec2 a, ufbx_vec2 b)
{
return a.x*b.x + a.y*b.y;
}
static ufbx_real ufbxt_dot3(ufbx_vec3 a, ufbx_vec3 b)
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
static ufbx_vec2 ufbxt_add2(ufbx_vec2 a, ufbx_vec2 b)
{
ufbx_vec2 v;
v.x = a.x + b.x;
v.y = a.y + b.y;
return v;
}
static ufbx_vec3 ufbxt_add3(ufbx_vec3 a, ufbx_vec3 b)
{
ufbx_vec3 v;
v.x = a.x + b.x;
v.y = a.y + b.y;
v.z = a.z + b.z;
return v;
}
static ufbx_vec2 ufbxt_sub2(ufbx_vec2 a, ufbx_vec2 b)
{
ufbx_vec2 v;
v.x = a.x - b.x;
v.y = a.y - b.y;
return v;
}
static ufbx_vec3 ufbxt_sub3(ufbx_vec3 a, ufbx_vec3 b)
{
ufbx_vec3 v;
v.x = a.x - b.x;
v.y = a.y - b.y;
v.z = a.z - b.z;
return v;
}
// -- Test framework
#define ufbxt_memory_context(data) \
ufbxt_make_memory_context(data, (uint32_t)sizeof(data) - 1)
#define ufbxt_memory_context_values(data) \
ufbxt_make_memory_context_values(data, (uint32_t)sizeof(data) - 1)
#define ufbxt_assert(cond) do { \
if (!(cond)) ufbxt_assert_fail(__FILE__, __LINE__, #cond); \
} while (0)
#define ufbxt_assert_eq(a, b, size) do { \
ufbxt_assert_eq_test(a, b, size, __FILE__, __LINE__, \
"ufbxt_assert_eq(" #a ", " #b ", " #size ")"); \
} while (0)
typedef struct {
int failed;
const char *file;
uint32_t line;
const char *expr;
} ufbxt_fail;
typedef struct {
const char *name;
void (*func)(void);
ufbxt_fail fail;
} ufbxt_test;
ufbxt_test *g_current_test;
uint64_t g_bechmark_begin_tick;
ufbx_error g_error;
jmp_buf g_test_jmp;
int g_verbose;
char g_log_buf[16*1024];
uint32_t g_log_pos;
char g_hint[8*1024];
bool g_skip_print_ok = false;
typedef struct {
char *test_name;
uint8_t patch_value;
uint32_t patch_offset;
uint32_t temp_limit;
uint32_t result_limit;
uint32_t truncate_length;
const char *description;
} ufbxt_check_line;
static ufbxt_check_line g_checks[16384];
ufbxt_threadlocal jmp_buf *t_jmp_buf;
void ufbxt_assert_fail(const char *file, uint32_t line, const char *expr)
{
if (t_jmp_buf) {
longjmp(g_test_jmp, 1);
}
printf("FAIL\n");
fflush(stdout);
g_current_test->fail.failed = 1;
g_current_test->fail.file = file;
g_current_test->fail.line = line;
g_current_test->fail.expr = expr;
longjmp(g_test_jmp, 1);
}
void ufbxt_logf(const char *fmt, ...)
{
if (!g_verbose) return;
va_list args;
va_start(args, fmt);
if (g_log_pos < sizeof(g_log_buf)) {
g_log_pos += vsnprintf(g_log_buf + g_log_pos,
sizeof(g_log_buf) - g_log_pos, fmt, args);
if (g_log_pos < sizeof(g_log_buf)) {
g_log_buf[g_log_pos] = '\n';
g_log_pos++;
}
}
va_end(args);
}
void ufbxt_hintf(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vsnprintf(g_hint, sizeof(g_hint), fmt, args);
va_end(args);
}
void ufbxt_assert_eq_test(const void *a, const void *b, size_t size, const char *file, uint32_t line, const char *expr)
{
const char *ac = (const char *)a;
const char *bc = (const char *)b;
for (size_t i = 0; i < size; i++) {
if (ac[i] == bc[i]) continue;
ufbxt_logf("Byte offset %u: 0x%02x != 0x%02x\n", (uint32_t)i, (uint8_t)ac[i], (uint8_t)bc[i]);
ufbxt_assert_fail(file, line, expr);
}
}
void ufbxt_log_flush()
{
int prev_newline = 1;
for (uint32_t i = 0; i < g_log_pos; i++) {
if (i >= sizeof(g_log_buf)) break;
char ch = g_log_buf[i];
if (ch == '\n') {
putchar('\n');
prev_newline = 1;
} else {
if (prev_newline) {
putchar(' ');
putchar(' ');
}
prev_newline = 0;
putchar(ch);
}
}
g_log_pos = 0;
}
void ufbxt_log_error(ufbx_error *err)
{
if (!err) return;
for (size_t i = 0; i < err->stack_size; i++) {
ufbx_error_frame *f = &err->stack[i];
ufbxt_logf("Line %u %s: %s", f->source_line, f->function, f->description);
}
}
void ufbxt_bechmark_begin()
{
g_bechmark_begin_tick = cputime_cpu_tick();
}
double ufbxt_bechmark_end()
{
uint64_t end_tick = cputime_cpu_tick();
uint64_t delta = end_tick - g_bechmark_begin_tick;
double sec = cputime_cpu_delta_to_sec(NULL, delta);
double ghz = (double)cputime_default_sync->cpu_freq / 1e9;
ufbxt_logf("%.3fms / %ukcy at %.2fGHz", sec * 1e3, (uint32_t)(delta / 1000), ghz);
return sec;
}
char data_root[256];
static void *ufbxt_read_file(const char *name, size_t *p_size)
{
FILE *file = fopen(name, "rb");
if (!file) return NULL;
fseek(file, 0, SEEK_END);
size_t size = ftell(file);
fseek(file, 0, SEEK_SET);
char *data = malloc(size + 1);
ufbxt_assert(data != NULL);
size_t num_read = fread(data, 1, size, file);
fclose(file);
data[size] = '\0';
if (num_read != size) {
ufbxt_assert_fail(__FILE__, __LINE__, "Failed to load file");
}
*p_size = size;
return data;
}
typedef struct {
char name[64];
size_t num_faces;
size_t num_indices;
ufbx_face *faces;
ufbx_vertex_vec3 vertex_position;
ufbx_vertex_vec3 vertex_normal;
ufbx_vertex_vec2 vertex_uv;
} ufbxt_obj_mesh;
typedef struct {
ufbxt_obj_mesh *meshes;
size_t num_meshes;
} ufbxt_obj_file;
static ufbxt_obj_file *ufbxt_load_obj(void *obj_data, size_t obj_size)
{
size_t num_positions = 0;
size_t num_normals = 0;
size_t num_uvs = 0;
size_t num_faces = 0;
size_t num_meshes = 0;
char *line = (char*)obj_data;
for (;;) {
char *end = strpbrk(line, "\r\n");
char prev = '\0';
if (end) {
prev = *end;
*end = '\0';
}
if (!strncmp(line, "v ", 2)) num_positions++;
else if (!strncmp(line, "vt ", 3)) num_uvs++;
else if (!strncmp(line, "vn ", 3)) num_normals++;
else if (!strncmp(line, "f ", 2)) num_faces++;
else if (!strncmp(line, "g default", 7)) { /* ignore default group */ }
else if (!strncmp(line, "g ", 2)) num_meshes++;
if (end) {
*end = prev;
line = end + 1;
} else {
break;
}
}
size_t alloc_size = 0;
alloc_size += sizeof(ufbxt_obj_file);
alloc_size += num_positions * sizeof(ufbx_vec3);
alloc_size += num_normals * sizeof(ufbx_vec3);
alloc_size += num_uvs * sizeof(ufbx_vec2);
alloc_size += num_faces * sizeof(ufbx_face);
alloc_size += num_faces * 3 * 4 * sizeof(int32_t);
alloc_size += num_meshes * sizeof(ufbxt_obj_mesh);
void *data = malloc(alloc_size);
ufbxt_assert(data);
ufbxt_obj_file *obj = (ufbxt_obj_file*)data;
ufbx_vec3 *positions = (ufbx_vec3*)(obj + 1);
ufbx_vec3 *normals = (ufbx_vec3*)(positions + num_positions);
ufbx_vec2 *uvs = (ufbx_vec2*)(normals + num_normals);
ufbx_face *faces = (ufbx_face*)(uvs + num_uvs);
int32_t *position_indices = (int32_t*)(faces + num_faces);
int32_t *normal_indices = (int32_t*)(position_indices + num_faces * 4);
int32_t *uv_indices = (int32_t*)(normal_indices + num_faces * 4);
ufbxt_obj_mesh *meshes = (ufbxt_obj_mesh*)(uv_indices + num_faces * 4);
void *data_end = meshes + num_meshes;
ufbxt_assert((char*)data_end - (char*)data == alloc_size);
ufbx_vec3 *dp = positions;
ufbx_vec3 *dn = normals;
ufbx_vec2 *du = uvs;
ufbxt_obj_mesh *mesh = NULL;
int32_t *dpi = position_indices;
int32_t *dni = normal_indices;
int32_t *dui = uv_indices;
ufbx_face *df = faces;
obj->meshes = meshes;
obj->num_meshes = num_meshes;
line = (char*)obj_data;
for (;;) {
char *line_end = strpbrk(line, "\r\n");
char prev = '\0';
if (line_end) {
prev = *line_end;
*line_end = '\0';
}
if (!strncmp(line, "v ", 2)) {
ufbxt_assert(sscanf(line, "v %lf %lf %lf", &dp->x, &dp->y, &dp->z) == 3);
dp++;
} else if (!strncmp(line, "vt ", 3)) {
ufbxt_assert(sscanf(line, "vt %lf %lf", &du->x, &du->y) == 2);
du++;
} else if (!strncmp(line, "vn ", 3)) {
ufbxt_assert(sscanf(line, "vn %lf %lf %lf", &dn->x, &dn->y, &dn->z) == 3);
dn++;
} else if (!strncmp(line, "f ", 2)) {
ufbxt_assert(mesh);
df->index_begin = (uint32_t)mesh->num_indices;
df->num_indices = 0;
char *begin = line + 2;
do {
char *end = strchr(begin, ' ');
if (end) *end++ = '\0';
int pi = 0, ui = 0, ni = 0;
if (sscanf(begin, "%d/%d/%d", &pi, &ui, &ni) == 3) {
} else if (sscanf(begin, "%d//%d", &pi, &ni) == 2) {
} else {
ufbxt_assert(0 && "Failed to parse face indices");
}
*dpi++ = pi - 1;
*dni++ = ni - 1;
*dui++ = ui - 1;
mesh->num_indices++;
df->num_indices++;
begin = end;
} while (begin);
mesh->num_faces++;
df++;
} else if (!strncmp(line, "g default", 7)) {
/* ignore default group */
} else if (!strncmp(line, "g ", 2)) {
mesh = mesh ? mesh + 1 : meshes;
memset(mesh, 0, sizeof(ufbxt_obj_mesh));
// HACK: Truncate name at '_' to separate Blender
// model and mesh names
size_t len = strcspn(line + 2, "_");
ufbxt_assert(len < sizeof(mesh->name));
memcpy(mesh->name, line + 2, len);
mesh->faces = df;
mesh->vertex_position.data = positions;
mesh->vertex_normal.data = normals;
mesh->vertex_uv.data = uvs;
mesh->vertex_position.indices = dpi;
mesh->vertex_normal.indices = dni;
mesh->vertex_uv.indices = dui;
}
if (line_end) {
*line_end = prev;
line = line_end + 1;
} else {
break;
}
}
return obj;
}
typedef struct {
size_t num;
ufbx_real sum;
ufbx_real max;
} ufbxt_diff_error;
static void ufbxt_assert_close_real(ufbxt_diff_error *p_err, ufbx_real a, ufbx_real b)
{
ufbx_real err = fabs(a - b);
ufbxt_assert(err < 0.001);
p_err->num++;
p_err->sum += err;
if (err > p_err->max) p_err->max = err;
}
static void ufbxt_assert_close_vec2(ufbxt_diff_error *p_err, ufbx_vec2 a, ufbx_vec2 b)
{
ufbxt_assert_close_real(p_err, a.x, b.x);
ufbxt_assert_close_real(p_err, a.y, b.y);
}
static void ufbxt_assert_close_vec3(ufbxt_diff_error *p_err, ufbx_vec3 a, ufbx_vec3 b)
{
ufbxt_assert_close_real(p_err, a.x, b.x);
ufbxt_assert_close_real(p_err, a.y, b.y);
ufbxt_assert_close_real(p_err, a.z, b.z);
}
static void ufbxt_assert_close_vec4(ufbxt_diff_error *p_err, ufbx_vec4 a, ufbx_vec4 b)
{
ufbxt_assert_close_real(p_err, a.x, b.x);
ufbxt_assert_close_real(p_err, a.y, b.y);
ufbxt_assert_close_real(p_err, a.z, b.z);
ufbxt_assert_close_real(p_err, a.w, b.w);
}
static void ufbxt_diff_to_obj(ufbx_scene *scene, ufbxt_obj_file *obj, ufbxt_diff_error *p_err, bool approx_normals)
{
for (size_t mesh_i = 0; mesh_i < obj->num_meshes; mesh_i++) {
ufbxt_obj_mesh *obj_mesh = &obj->meshes[mesh_i];
if (obj_mesh->num_indices == 0) continue;
ufbx_mesh *mesh = ufbx_find_mesh(scene, obj_mesh->name);
ufbxt_assert(mesh);
ufbxt_assert(obj_mesh->num_faces == mesh->num_faces);
ufbxt_assert(obj_mesh->num_indices == mesh->num_indices);
ufbx_matrix *mat = &mesh->node.to_root;
ufbx_matrix norm_mat = ufbx_get_normal_matrix(mat);
// Assume that the indices are in the same order!
for (size_t face_ix = 0; face_ix < mesh->num_faces; face_ix++) {
ufbx_face obj_face = obj_mesh->faces[face_ix];
ufbx_face face = mesh->faces[face_ix];
ufbxt_assert(obj_face.index_begin == face.index_begin);
ufbxt_assert(obj_face.num_indices == face.num_indices);
for (size_t ix = 0; ix < face.num_indices; ix++) {
ufbx_vec3 op = ufbx_get_vertex_vec3(&obj_mesh->vertex_position, ix);
ufbx_vec3 fp = ufbx_get_vertex_vec3(&mesh->vertex_position, ix);
ufbx_vec3 on = ufbx_get_vertex_vec3(&obj_mesh->vertex_normal, ix);
ufbx_vec3 fn = ufbx_get_vertex_vec3(&mesh->vertex_normal, ix);
fp = ufbx_transform_position(mat, fp);
fn = ufbx_transform_direction(&norm_mat, fn);
ufbx_real fn_len = sqrt(fn.x*fn.x + fn.y*fn.y + fn.z*fn.z);
fn.x /= fn_len;
fn.y /= fn_len;
fn.z /= fn_len;
ufbxt_assert_close_vec3(p_err, op, fp);
if (approx_normals) {
ufbx_real dot = ufbxt_dot3(on, fn);
ufbxt_assert(dot >= 0.9);
} else {
ufbxt_assert_close_vec3(p_err, on, fn);
}
if (mesh->vertex_uv.data) {
ufbxt_assert(obj_mesh->vertex_uv.data);
ufbx_vec2 ou = ufbx_get_vertex_vec2(&obj_mesh->vertex_uv, ix);
ufbx_vec2 fu = ufbx_get_vertex_vec2(&mesh->vertex_uv, ix);
ufbxt_assert_close_vec2(p_err, ou, fu);
}
}
}
}
}
void ufbxt_check_string(ufbx_string str)
{
// Data may never be NULL, empty strings should have data = ""
ufbxt_assert(str.data != NULL);
ufbxt_assert(strlen(str.data) == str.length);
}
void ufbxt_check_vertex_element(ufbx_scene *scene, ufbx_mesh *mesh, void *void_elem, size_t elem_size)
{
ufbx_vertex_void *elem = (ufbx_vertex_void*)void_elem;
if (elem->data == NULL) {
ufbxt_assert(elem->indices == NULL);
ufbxt_assert(elem->num_elements == 0);
return;
}
ufbxt_assert(elem->num_elements >= 0);
ufbxt_assert(elem->indices != NULL);
// Check that the indices are in range
for (size_t i = 0; i < mesh->num_indices; i++) {
int32_t ix = elem->indices[i];
ufbxt_assert(ix >= -1 && ix < elem->num_elements);
}
// Check that the data at invalid index is valid and zero
char zero[32] = { 0 };
ufbxt_assert(elem_size <= 32);
ufbxt_assert(!memcmp((char*)elem->data - elem_size, zero, elem_size));
}
void ufbxt_check_props(ufbx_scene *scene, ufbx_props *props, bool top)
{
ufbx_prop *prev = NULL;
for (size_t i = 0; i < props->num_props; i++) {
ufbx_prop *prop = &props->props[i];
ufbxt_assert(prop->type < UFBX_NUM_PROP_TYPES);
ufbxt_check_string(prop->name);
ufbxt_check_string(prop->value_str);
// Properties should be sorted by name and duplicates should be removed
if (prev) {
ufbxt_assert(prop->imp_key >= prev->imp_key);
ufbxt_assert(strcmp(prop->name.data, prev->name.data) > 0);
}
if (top) {
ufbx_prop *ref = ufbx_find_prop(props, prop->name.data);
ufbxt_assert(prop == ref);
}
prev = prop;
}
if (props->defaults) {
ufbxt_check_props(scene, props->defaults, false);
}
}
void ufbxt_check_node(ufbx_scene *scene, ufbx_node *node)
{
ufbxt_check_string(node->name);
ufbxt_check_props(scene, &node->props, true);
if (node->parent) {
bool found = false;
for (size_t i = 0; i < node->parent->children.size; i++) {
if (node->parent->children.data[i] == node) {
found = true;
break;
}
}
ufbxt_assert(found);
}
for (size_t i = 0; i < node->children.size; i++) {
ufbxt_assert(node->children.data[i]->parent == node);
}
}
void ufbxt_check_mesh(ufbx_scene *scene, ufbx_mesh *mesh)
{
ufbx_mesh *found = ufbx_find_mesh(scene, mesh->node.name.data);
ufbxt_assert(found && !strcmp(found->node.name.data, mesh->node.name.data));
ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_position, sizeof(ufbx_vec3));
ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_normal, sizeof(ufbx_vec3));
ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_binormal, sizeof(ufbx_vec3));
ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_tangent, sizeof(ufbx_vec3));
ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_uv, sizeof(ufbx_vec2));
ufbxt_check_vertex_element(scene, mesh, &mesh->vertex_color, sizeof(ufbx_vec4));
ufbxt_assert(mesh->num_vertices == mesh->vertex_position.num_elements);
ufbxt_assert(mesh->num_triangles <= mesh->num_indices);
uint32_t prev_end = 0;
for (size_t i = 0; i < mesh->num_faces + mesh->num_bad_faces; i++) {
ufbx_face face = mesh->faces[i];
if (i == mesh->num_faces) prev_end = 0;
if (mesh->num_bad_faces == 0) {
ufbxt_assert(face.index_begin == prev_end);
} else {
ufbxt_assert(face.index_begin >= prev_end);
}
if (i < mesh->num_faces) {
ufbxt_assert(face.num_indices >= 3);
} else {
ufbxt_assert(face.num_indices > 0 && face.num_indices < 3);
}
prev_end = face.index_begin + face.num_indices;
ufbxt_assert(prev_end <= mesh->num_indices);
for (size_t j = face.index_begin; j < face.index_begin + face.num_indices; j++) {
ufbx_face *p_face = ufbx_find_face(mesh, j);
ufbxt_assert(p_face - mesh->faces == i);
}
if (face.num_indices >= 3) {
size_t num_tris = face.num_indices - 2;
ufbxt_assert(face.num_indices <= 1024);
uint32_t tris[1024];
size_t ix_count[1024];
memset(tris, 0xff, num_tris * 3 * sizeof(uint32_t));
memset(ix_count, 0, face.num_indices * sizeof(uint32_t));
ufbxt_assert(ufbx_triangulate(tris, ufbxt_arraycount(tris), mesh, face));
for (size_t i = 0; i < num_tris; i++) {
uint32_t a = tris[i*3 + 0];
uint32_t b = tris[i*3 + 1];
uint32_t c = tris[i*3 + 2];
ufbxt_assert(a != b);
ufbxt_assert(b != c);
ufbxt_assert(a >= face.index_begin && a - face.index_begin < face.num_indices);
ufbxt_assert(b >= face.index_begin && b - face.index_begin < face.num_indices);
ufbxt_assert(c >= face.index_begin && c - face.index_begin < face.num_indices);
ix_count[a - face.index_begin]++;
ix_count[b - face.index_begin]++;
ix_count[c - face.index_begin]++;
}
for (uint32_t i = 0; i < face.num_indices; i++) {
ufbxt_assert(ix_count[i] >= 0);
}
}
}
for (size_t i = 0; i < mesh->num_edges; i++) {
ufbx_edge edge = mesh->edges[i];
ufbxt_assert(edge.indices[0] < mesh->num_indices);
ufbxt_assert(edge.indices[1] < mesh->num_indices);
}
for (size_t i = 0; i < mesh->uv_sets.size; i++) {
ufbx_uv_set *set = &mesh->uv_sets.data[i];
if (i == 0) {
ufbxt_assert(mesh->vertex_uv.data == set->vertex_uv.data);
ufbxt_assert(mesh->vertex_uv.indices == set->vertex_uv.indices);
ufbxt_assert(mesh->vertex_uv.num_elements == set->vertex_uv.num_elements);
}
ufbxt_check_string(set->name);
ufbxt_check_vertex_element(scene, mesh, &set->vertex_uv, sizeof(ufbx_vec2));
}
for (size_t i = 0; i < mesh->color_sets.size; i++) {
ufbx_color_set *set = &mesh->color_sets.data[i];
if (i == 0) {
ufbxt_assert(mesh->vertex_color.data == set->vertex_color.data);
ufbxt_assert(mesh->vertex_color.indices == set->vertex_color.indices);
ufbxt_assert(mesh->vertex_color.num_elements == set->vertex_color.num_elements);
}
ufbxt_check_string(set->name);
ufbxt_check_vertex_element(scene, mesh, &set->vertex_color, sizeof(ufbx_vec4));
}
for (size_t i = 0; i < mesh->num_edges; i++) {
ufbx_edge edge = mesh->edges[i];
ufbxt_assert(edge.indices[0] < mesh->num_indices);
ufbxt_assert(edge.indices[1] < mesh->num_indices);
ufbx_face *face = ufbx_find_face(mesh, edge.indices[0]);
ufbxt_assert(face);
ufbxt_assert(face == ufbx_find_face(mesh, edge.indices[1]));
}
if (mesh->face_material) {
for (size_t i = 0; i < mesh->num_faces; i++) {
int32_t material = mesh->face_material[i];
ufbxt_assert(material >= 0 && material < mesh->materials.size);
}
}
for (size_t i = 0; i < mesh->skins.size; i++) {
ufbx_skin *skin = &mesh->skins.data[i];
ufbxt_assert(skin->bone);
ufbxt_check_node(scene, skin->bone);
for (size_t j = 0; j < skin->num_weights; j++) {
ufbxt_assert(skin->indices[j] >= -1 && skin->indices[j] < mesh->num_vertices);
}
}
}
void ufbxt_check_material(ufbx_scene *scene, ufbx_material *material)
{
ufbxt_check_string(material->name);
ufbxt_check_props(scene, &material->props, true);
}
void ufbxt_check_anim_stack(ufbx_scene *scene, ufbx_anim_stack *anim_stack)
{
ufbxt_check_string(anim_stack->name);
ufbxt_check_props(scene, &anim_stack->props, true);
for (size_t i = 0; i < anim_stack->layers.size; i++) {
ufbx_anim_layer *layer = anim_stack->layers.data[i];
ptrdiff_t layer_i = scene->anim_layers.data - layer;
ufbxt_assert(layer >= scene->anim_layers.data);
ufbxt_assert(layer < scene->anim_layers.data + scene->anim_layers.size);
}
}
void ufbxt_check_anim_layer(ufbx_scene *scene, ufbx_anim_layer *anim_layer)
{
ufbxt_check_string(anim_layer->name);
ufbxt_check_props(scene, &anim_layer->layer_props, true);
}
void ufbxt_check_anim_prop(ufbx_scene *scene, ufbx_anim_prop *anim_prop)
{
ufbxt_check_string(anim_prop->name);
switch (anim_prop->target)
{
case UFBX_ANIM_UNKNOWN: /* Nop */ break;
case UFBX_ANIM_MODEL: ufbxt_assert(anim_prop->index < scene->models.size); break;
case UFBX_ANIM_MESH: ufbxt_assert(anim_prop->index < scene->meshes.size); break;
case UFBX_ANIM_LIGHT: ufbxt_assert(anim_prop->index < scene->lights.size); break;
case UFBX_ANIM_MATERIAL: ufbxt_assert(anim_prop->index < scene->materials.size); break;
case UFBX_ANIM_BONE: ufbxt_assert(anim_prop->index < scene->bones.size); break;
case UFBX_ANIM_ANIM_LAYER: ufbxt_assert(anim_prop->index < scene->anim_layers.size); break;
default: ufbxt_assert(0 && "Bad anim target"); break;
}
}
void ufbxt_check_scene(ufbx_scene *scene)
{
ufbxt_check_string(scene->metadata.creator);
for (size_t i = 0; i < scene->nodes.size; i++) {
ufbxt_check_node(scene, scene->nodes.data[i]);
}
for (size_t i = 0; i < scene->meshes.size; i++) {
ufbxt_check_mesh(scene, &scene->meshes.data[i]);
}
for (size_t i = 0; i < scene->materials.size; i++) {
ufbxt_check_material(scene, &scene->materials.data[i]);
}
for (size_t i = 0; i < scene->anim_stacks.size; i++) {
ufbxt_check_anim_stack(scene, &scene->anim_stacks.data[i]);
}
for (size_t i = 0; i < scene->anim_layers.size; i++) {
ufbxt_check_anim_layer(scene, &scene->anim_layers.data[i]);
}
for (size_t i = 0; i < scene->anim_props.size; i++) {
ufbxt_check_anim_prop(scene, &scene->anim_props.data[i]);
}
}
static uint32_t g_file_version = 0;
static const char *g_file_type = NULL;
static bool g_fuzz = false;
static bool g_all_byte_values = false;
static bool g_dedicated_allocs = false;
static bool g_no_patch = false;
static int g_patch_start = 0;
static size_t g_fuzz_step = 0;
const char *g_fuzz_test_name = NULL;
static bool ufbxt_begin_fuzz()
{
if (g_fuzz) {
if (!g_skip_print_ok) {
printf("FUZZ\n");
g_skip_print_ok = true;
}
return true;
} else {
return false;
}
}
int ufbxt_test_fuzz(void *data, size_t size, size_t step, int offset, size_t temp_limit, size_t result_limit, size_t truncate_length)
{
if (g_fuzz_step && step != g_fuzz_step) return 1;
t_jmp_buf = (jmp_buf*)calloc(1, sizeof(jmp_buf));
int ret = 1;
if (!setjmp(*t_jmp_buf)) {
ufbx_load_opts opts = { 0 };
opts.max_temp_allocs = temp_limit;
opts.max_result_allocs = result_limit;
if (g_dedicated_allocs) {
opts.temp_huge_size = 1;
opts.result_huge_size = 1;
}
if (truncate_length > 0) size = truncate_length;
ufbx_error error;
ufbx_scene *scene = ufbx_load_memory(data, size, &opts, &error);
if (scene) {
ufbxt_check_scene(scene);
ufbx_free_scene(scene);
ufbxt_assert(temp_limit == 0);
ufbxt_assert(result_limit == 0);
} else {
// Collect hit checks
for (size_t i = 0; i < error.stack_size; i++) {
ufbx_error_frame frame = error.stack[i];
ufbxt_check_line *check = &g_checks[frame.source_line];
if (check->test_name && strcmp(g_fuzz_test_name, check->test_name)) continue;
if ((uint32_t)offset > check->patch_offset - 1) continue;
#pragma omp critical(check)
{
bool ok = (uint32_t)offset <= check->patch_offset - 1;
if (check->test_name && strcmp(g_fuzz_test_name, check->test_name)) ok = false;
if (ok) {
if (!check->test_name) {
size_t name_len = strlen(g_fuzz_test_name) + 1;
check->test_name = (char*)malloc(name_len);
if (check->test_name) {
memcpy(check->test_name, g_fuzz_test_name, name_len);
}
}
if (offset < 0) {
check->patch_offset = UINT32_MAX;
check->patch_value = 0;
} else {
check->patch_offset = offset + 1;
check->patch_value = ((uint8_t*)data)[offset];
}
check->temp_limit = (uint32_t)temp_limit;
check->result_limit = (uint32_t)result_limit;
check->truncate_length = (uint32_t)truncate_length;
check->description = frame.description;
}
}
}
}
} else {
ret = 0;
}
free(t_jmp_buf);
t_jmp_buf = NULL;
return ret;
}
typedef struct {
const char *name;
int32_t patch_offset;
uint8_t patch_value;
uint32_t temp_limit;
uint32_t result_limit;
uint32_t truncate_length;
const char *description;
} ufbxt_fuzz_check;
// Generated by running `runner --fuzz`
static const ufbxt_fuzz_check g_fuzz_checks[] = {
{ "blender_279_ball_6100_ascii", 18422, 82, 0, 0, 0, "ufbxi_read_truncated_array(uc, &mesh->face_smoothing, n..." },
{ "blender_279_ball_6100_ascii", 18755, 76, 0, 0, 0, "ufbxi_read_truncated_array(uc, &mesh->face_material, n,..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 0, 17, "header" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 0, 17, "uc->read_fn" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 1, 0, "defs" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 16, 0, "dst" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 16, 0, "name" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 160, 0, "v" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 17, 0, "name" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 256, 0, "v" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 262, 0, "mesh->faces" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 3, 0, "defs->props" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 301, 0, "dst->props" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 302, 0, "ufbxi_merge_properties(uc, &material->props, object->pr..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 324, 0, "ufbxi_push_string_place_str(uc, &layer->name)" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 336, 0, "curve->keyframes.data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 352, 0, "arr->data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 352, 0, "ufbxi_retain_array(uc, sizeof(ufbx_model), &uc->scene.m..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 353, 0, "ufbxi_retain_array(uc, sizeof(ufbx_mesh), &uc->scene.me..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 354, 0, "ufbxi_retain_array(uc, sizeof(ufbx_material), &uc->scen..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 355, 0, "ufbxi_retain_array(uc, sizeof(ufbx_light), &uc->scene.l..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 356, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_stack), &uc->sc..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 357, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_layer), &uc->sc..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 358, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_prop), &uc->sce..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 359, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_curve), &uc->sc..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 360, 0, "zero_indices && consecutive_indices" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 362, 0, "ufbxi_merge_attribute_properties(uc, &parent.node->prop..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 362, 0, "ufbxi_merge_properties(uc, props, props, attr, &uc->res..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 363, 0, "stack->layers.data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 364, 0, "layer->props.data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 364, 0, "mesh->faces" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 365, 0, "mesh->materials.data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 366, 0, "nodes" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 367, 0, "node->children.data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 367, 0, "ufbxi_collect_nodes(uc, sizeof(ufbx_model), &nodes, uc-..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 40, 0, "ufbxi_push_string_place_str(uc, name)" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 434, 0, "dst->props" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 435, 0, "ufbxi_merge_properties(uc, &material->props, object->pr..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 475, 0, "ufbxi_push_string_place_str(uc, &layer->name)" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 480, 0, "curve->keyframes.data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 504, 0, "ufbxi_retain_array(uc, sizeof(ufbx_model), &uc->scene.m..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 505, 0, "ufbxi_retain_array(uc, sizeof(ufbx_mesh), &uc->scene.me..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 506, 0, "ufbxi_retain_array(uc, sizeof(ufbx_material), &uc->scen..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 507, 0, "ufbxi_retain_array(uc, sizeof(ufbx_light), &uc->scene.l..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 508, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_stack), &uc->sc..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 509, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_layer), &uc->sc..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 510, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_prop), &uc->sce..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 512, 0, "arr->data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 512, 0, "ufbxi_retain_array(uc, sizeof(ufbx_anim_curve), &uc->sc..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 513, 0, "zero_indices && consecutive_indices" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 517, 0, "ufbxi_merge_attribute_properties(uc, &parent.node->prop..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 517, 0, "ufbxi_merge_properties(uc, props, props, attr, &uc->res..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 519, 0, "stack->layers.data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 520, 0, "layer->props.data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 522, 0, "mesh->materials.data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 524, 0, "nodes" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 526, 0, "node->children.data" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 526, 0, "ufbxi_collect_nodes(uc, sizeof(ufbx_model), &nodes, uc-..." },
{ "blender_279_default_6100_ascii", -1, 0, 0, 528, 0, "imp" },
{ "blender_279_default_6100_ascii", -1, 0, 0, 62, 0, "ufbxi_push_string_place_str(uc, name)" },
{ "blender_279_default_6100_ascii", -1, 0, 1, 0, 0, "ufbxi_load_strings(uc)" },
{ "blender_279_default_6100_ascii", -1, 0, 1, 0, 0, "ufbxi_map_grow(&uc->string_map, ufbx_string, 16)" },
{ "blender_279_default_6100_ascii", -1, 0, 1, 0, 0, "ufbxi_push_string_imp(uc, str->data, str->length, false..." },
{ "blender_279_default_6100_ascii", -1, 0, 1093, 0, 0, "light" },
{ "blender_279_default_6100_ascii", -1, 0, 11, 0, 0, "prop->name.data" },
{ "blender_279_default_6100_ascii", -1, 0, 11, 0, 0, "ufbxi_load_default_props(uc)" },
{ "blender_279_default_6100_ascii", -1, 0, 1252, 0, 0, "arr" },
{ "blender_279_default_6100_ascii", -1, 0, 13, 0, 0, "ufbxi_ascii_next_token(uc, &uc->ascii.token)" },
{ "blender_279_default_6100_ascii", -1, 0, 13, 0, 0, "ufbxi_ascii_push_token_char(uc, token, c)" },
{ "blender_279_default_6100_ascii", -1, 0, 13, 0, 0, "ufbxi_begin_parse(uc)" },
{ "blender_279_default_6100_ascii", -1, 0, 1382, 0, 0, "mesh" },
{ "blender_279_default_6100_ascii", -1, 0, 1384, 0, 0, "mesh" },
{ "blender_279_default_6100_ascii", -1, 0, 1386, 0, 0, "conn" },
{ "blender_279_default_6100_ascii", -1, 0, 1386, 0, 0, "ufbxi_add_connection(uc, object->id, geom_obj.id, NULL)" },
{ "blender_279_default_6100_ascii", -1, 0, 14, 0, 0, "model" },
{ "blender_279_default_6100_ascii", -1, 0, 1437, 0, 0, "light" },
{ "blender_279_default_6100_ascii", -1, 0, 1596, 0, 0, "arr" },
{ "blender_279_default_6100_ascii", -1, 0, 16, 0, 0, "node" },
{ "blender_279_default_6100_ascii", -1, 0, 17, 0, 0, "ator->allocs_left > 1" },
{ "blender_279_default_6100_ascii", -1, 0, 1726, 0, 0, "mesh" },
{ "blender_279_default_6100_ascii", -1, 0, 1728, 0, 0, "mesh" },
{ "blender_279_default_6100_ascii", -1, 0, 1730, 0, 0, "conn" },
{ "blender_279_default_6100_ascii", -1, 0, 1730, 0, 0, "ufbxi_add_connection(uc, object->id, geom_obj.id, NULL)" },
{ "blender_279_default_6100_ascii", -1, 0, 18, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &uc->top_nodes, &uc->to..." },
{ "blender_279_default_6100_ascii", -1, 0, 19, 0, 0, "ufbxi_ascii_push_token_char(uc, token, c)" },
{ "blender_279_default_6100_ascii", -1, 0, 19, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &token->str_data, &toke..." },
{ "blender_279_default_6100_ascii", -1, 0, 272, 0, 0, "prop" },
{ "blender_279_default_6100_ascii", -1, 0, 32, 0, 0, "ator->allocs_left > 1" },
{ "blender_279_default_6100_ascii", -1, 0, 32, 0, 0, "node->vals" },
{ "blender_279_default_6100_ascii", -1, 0, 329, 0, 0, "props->props" },
{ "blender_279_default_6100_ascii", -1, 0, 331, 0, 0, "dst->props" },
{ "blender_279_default_6100_ascii", -1, 0, 331, 0, 0, "ufbxi_merge_properties(uc, dst, &left, &right, buf)" },
{ "blender_279_default_6100_ascii", -1, 0, 331, 0, 0, "ufbxi_sort_properties(uc, &left, &uc->tmp_sort)" },
{ "blender_279_default_6100_ascii", -1, 0, 331, 0, 0, "ufbxi_sort_properties(uc, props, buf)" },
{ "blender_279_default_6100_ascii", -1, 0, 333, 0, 0, "ufbxi_sort_properties(uc, &right, &uc->tmp_sort)" },
{ "blender_279_default_6100_ascii", -1, 0, 334, 0, 0, "model" },
{ "blender_279_default_6100_ascii", -1, 0, 416, 0, 0, "prop" },
{ "blender_279_default_6100_ascii", -1, 0, 4613, 0, 0, "material" },
{ "blender_279_default_6100_ascii", -1, 0, 4613, 0, 0, "ufbxi_read_material(uc, node, &object)" },
{ "blender_279_default_6100_ascii", -1, 0, 4644, 0, 0, "str" },
{ "blender_279_default_6100_ascii", -1, 0, 4644, 0, 0, "ufbxi_push_string_place_str(uc, &v->s)" },
{ "blender_279_default_6100_ascii", -1, 0, 4678, 0, 0, "node->children" },
{ "blender_279_default_6100_ascii", -1, 0, 4688, 0, 0, "ufbxi_add_connection(uc, parent_id, child_id, prop)" },
{ "blender_279_default_6100_ascii", -1, 0, 4721, 0, 0, "v" },
{ "blender_279_default_6100_ascii", -1, 0, 4722, 0, 0, "v" },
{ "blender_279_default_6100_ascii", -1, 0, 4723, 0, 0, "v" },
{ "blender_279_default_6100_ascii", -1, 0, 4724, 0, 0, "arr_data" },
{ "blender_279_default_6100_ascii", -1, 0, 48, 0, 0, "node->children" },
{ "blender_279_default_6100_ascii", -1, 0, 49, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_MODEL, root..." },
{ "blender_279_default_6100_ascii", -1, 0, 49, 0, 0, "ufbxi_map_grow(&uc->connectable_map, ufbxi_connectable,..." },
{ "blender_279_default_6100_ascii", -1, 0, 4965, 0, 0, "material" },
{ "blender_279_default_6100_ascii", -1, 0, 4965, 0, 0, "ufbxi_read_material(uc, node, &object)" },
{ "blender_279_default_6100_ascii", -1, 0, 4996, 0, 0, "str" },
{ "blender_279_default_6100_ascii", -1, 0, 4996, 0, 0, "ufbxi_push_string_place_str(uc, &v->s)" },
{ "blender_279_default_6100_ascii", -1, 0, 50, 0, 0, "data" },
{ "blender_279_default_6100_ascii", -1, 0, 5042, 0, 0, "node->children" },
{ "blender_279_default_6100_ascii", -1, 0, 5056, 0, 0, "ufbxi_add_connection(uc, parent_id, child_id, prop)" },
{ "blender_279_default_6100_ascii", -1, 0, 5107, 0, 0, "v" },
{ "blender_279_default_6100_ascii", -1, 0, 5108, 0, 0, "v" },
{ "blender_279_default_6100_ascii", -1, 0, 5109, 0, 0, "v" },
{ "blender_279_default_6100_ascii", -1, 0, 5110, 0, 0, "arr_data" },
{ "blender_279_default_6100_ascii", -1, 0, 5232, 0, 0, "stack" },
{ "blender_279_default_6100_ascii", -1, 0, 5234, 0, 0, "layer" },
{ "blender_279_default_6100_ascii", -1, 0, 5236, 0, 0, "ufbxi_add_connection(uc, stack_id, layer_id, NULL)" },
{ "blender_279_default_6100_ascii", -1, 0, 5237, 0, 0, "prop" },
{ "blender_279_default_6100_ascii", -1, 0, 5239, 0, 0, "ufbxi_add_connection(uc, layer_id, id, name)" },
{ "blender_279_default_6100_ascii", -1, 0, 5240, 0, 0, "ufbxi_add_connection(uc, node_id, id, name)" },
{ "blender_279_default_6100_ascii", -1, 0, 5264, 0, 0, "curve" },
{ "blender_279_default_6100_ascii", -1, 0, 5265, 0, 0, "ufbxi_add_connection(uc, parent_id, id, name)" },
{ "blender_279_default_6100_ascii", -1, 0, 5320, 0, 0, "conns" },
{ "blender_279_default_6100_ascii", -1, 0, 5320, 0, 0, "ufbxi_finalize_scene(uc)" },
{ "blender_279_default_6100_ascii", -1, 0, 54, 0, 0, "ator->allocs_left > 1" },
{ "blender_279_default_6100_ascii", -1, 0, 556, 0, 0, "props->props" },
{ "blender_279_default_6100_ascii", -1, 0, 560, 0, 0, "dst->props" },
{ "blender_279_default_6100_ascii", -1, 0, 560, 0, 0, "ufbxi_merge_properties(uc, dst, &left, &right, buf)" },
{ "blender_279_default_6100_ascii", -1, 0, 560, 0, 0, "ufbxi_sort_properties(uc, &left, &uc->tmp_sort)" },
{ "blender_279_default_6100_ascii", -1, 0, 560, 0, 0, "ufbxi_sort_properties(uc, props, buf)" },
{ "blender_279_default_6100_ascii", -1, 0, 561, 0, 0, "ufbxi_sort_properties(uc, &right, &uc->tmp_sort)" },
{ "blender_279_default_6100_ascii", -1, 0, 563, 0, 0, "model" },
{ "blender_279_default_6100_ascii", -1, 0, 5667, 0, 0, "stack" },
{ "blender_279_default_6100_ascii", -1, 0, 5669, 0, 0, "layer" },
{ "blender_279_default_6100_ascii", -1, 0, 5671, 0, 0, "ufbxi_add_connection(uc, stack_id, layer_id, NULL)" },
{ "blender_279_default_6100_ascii", -1, 0, 5680, 0, 0, "curve" },
{ "blender_279_default_6100_ascii", -1, 0, 5681, 0, 0, "ufbxi_add_connection(uc, parent_id, id, name)" },
{ "blender_279_default_6100_ascii", -1, 0, 5691, 0, 0, "prop" },
{ "blender_279_default_6100_ascii", -1, 0, 5693, 0, 0, "ufbxi_add_connection(uc, layer_id, id, name)" },
{ "blender_279_default_6100_ascii", -1, 0, 5696, 0, 0, "ufbxi_add_connection(uc, node_id, id, name)" },
{ "blender_279_default_6100_ascii", -1, 0, 5835, 0, 0, "conns" },
{ "blender_279_default_6100_ascii", -1, 0, 5835, 0, 0, "ufbxi_finalize_scene(uc)" },
{ "blender_279_default_6100_ascii", -1, 0, 64, 0, 0, "node->children" },
{ "blender_279_default_6100_ascii", -1, 0, 64, 0, 0, "node->vals" },
{ "blender_279_default_6100_ascii", -1, 0, 65, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_MODEL, root..." },
{ "blender_279_default_6100_ascii", -1, 0, 65, 0, 0, "ufbxi_map_grow(&uc->connectable_map, ufbxi_connectable,..." },
{ "blender_279_default_6100_ascii", -1, 0, 66, 0, 0, "data" },
{ "blender_279_default_6100_ascii", -1, 0, 70, 0, 0, "ator->allocs_left > 1" },
{ "blender_279_default_6100_ascii", -1, 0, 9, 0, 0, "ufbxi_load_maps(uc)" },
{ "blender_279_default_6100_ascii", -1, 0, 9, 0, 0, "ufbxi_map_grow(&uc->prop_type_map, ufbxi_prop_type_name..." },
{ "blender_279_default_6100_ascii", 0, 255, 0, 0, 0, "ufbxi_ascii_accept(uc, UFBXI_ASCII_NAME)" },
{ "blender_279_default_6100_ascii", 0, 255, 0, 0, 0, "ufbxi_ascii_parse_node(uc, 0, UFBXI_PARSE_ROOT, &end, &..." },
{ "blender_279_default_6100_ascii", 0, 255, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_FBXHeaderExtension)" },
{ "blender_279_default_6100_ascii", 0, 255, 0, 0, 0, "ufbxi_read_root(uc)" },
{ "blender_279_default_6100_ascii", 1011, 0, 0, 0, 0, "ufbxi_check_string(*type)" },
{ "blender_279_default_6100_ascii", 1011, 0, 0, 0, 0, "ufbxi_split_type_and_name(uc, type_and_name, &type_str,..." },
{ "blender_279_default_6100_ascii", 1106, 0, 0, 0, 0, "ufbxi_get_val2(node, \"SC\", &prop->name, (char**)&type..." },
{ "blender_279_default_6100_ascii", 1106, 0, 0, 0, 0, "ufbxi_read_properties(uc, node, &object.props, &uc->res..." },
{ "blender_279_default_6100_ascii", 1106, 0, 0, 0, 0, "ufbxi_read_property(uc, prop_node, prop, version)" },
{ "blender_279_default_6100_ascii", 1266, 43, 0, 0, 0, "end == token->str_data + token->str_len - 1" },
{ "blender_279_default_6100_ascii", 180, 255, 0, 0, 0, "ufbxi_ascii_parse_node(uc, 0, state, p_end, buf, true)" },
{ "blender_279_default_6100_ascii", 180, 255, 0, 0, 0, "ufbxi_parse_toplevel_child_imp(uc, state, &uc->tmp_pars..." },
{ "blender_279_default_6100_ascii", 180, 255, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &child)" },
{ "blender_279_default_6100_ascii", 180, 255, 0, 0, 0, "ufbxi_read_header_extension(uc)" },
{ "blender_279_default_6100_ascii", 20623, 79, 0, 0, 0, "ufbxi_read_geometry(uc, node, &geom_obj)" },
{ "blender_279_default_6100_ascii", 20623, 79, 0, 0, 0, "ufbxi_read_model(uc, node, &object)" },
{ "blender_279_default_6100_ascii", 20623, 79, 0, 0, 0, "vertices && indices" },
{ "blender_279_default_6100_ascii", 20654, 56, 0, 0, 0, "ix < mesh->num_vertices" },
{ "blender_279_default_6100_ascii", 20694, 46, 0, 0, 0, "index_data[mesh->num_indices - 1] < 0" },
{ "blender_279_default_6100_ascii", 20782, 76, 0, 0, 0, "ufbxi_find_val1(node, ufbxi_MappingInformationType, \"C..." },
{ "blender_279_default_6100_ascii", 20782, 76, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &mesh->vertex_no..." },
{ "blender_279_default_6100_ascii", 20807, 255, 0, 0, 0, "Invalid mapping" },
{ "blender_279_default_6100_ascii", 20867, 77, 0, 0, 0, "data" },
{ "blender_279_default_6100_ascii", 21690, 76, 0, 0, 0, "ufbxi_find_val1(n, ufbxi_MappingInformationType, \"C\",..." },
{ "blender_279_default_6100_ascii", 21774, 76, 0, 0, 0, "arr && arr->size >= 1" },
{ "blender_279_default_6100_ascii", 251, 0, 0, 0, 0, "depth == 0" },
{ "blender_279_default_6100_ascii", 251, 255, 0, 0, 0, "ufbxi_ascii_parse_node(uc, depth + 1, parse_state, &end..." },
{ "blender_279_default_6100_ascii", 382, 0, 0, 0, 0, "c != '\\0'" },
{ "blender_279_default_6100_ascii", 382, 0, 0, 0, 0, "ufbxi_ascii_next_token(uc, &ua->token)" },
{ "blender_279_default_6100_ascii", 415, 255, 0, 0, 0, "end == token->str_data + token->str_len - 1" },
{ "blender_279_default_6100_ascii", 454, 255, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_Definitions)" },
{ "blender_279_default_6100_ascii", 645, 255, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &object)" },
{ "blender_279_default_6100_ascii", 645, 255, 0, 0, 0, "ufbxi_read_definitions(uc)" },
{ "blender_279_default_6100_ascii", 73300, 255, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_Connections)" },
{ "blender_279_default_6100_ascii", 73408, 255, 0, 0, 0, "ufbxi_parse_toplevel_child_imp(uc, state, &uc->tmp, &en..." },
{ "blender_279_default_6100_ascii", 74083, 255, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &node)" },
{ "blender_279_default_6100_ascii", 74083, 255, 0, 0, 0, "ufbxi_read_connections(uc)" },
{ "blender_279_default_6100_ascii", 74100, 80, 0, 0, 0, "ufbxi_get_val_at(node, 3, 'C', (char**)&prop)" },
{ "blender_279_default_6100_ascii", 74285, 255, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_Takes)" },
{ "blender_279_default_6100_ascii", 74380, 255, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &node)" },
{ "blender_279_default_6100_ascii", 74380, 255, 0, 0, 0, "ufbxi_read_takes(uc)" },
{ "blender_279_default_6100_ascii", 74417, 255, 0, 0, 0, "ufbxi_get_val1(node, \"S\", &stack->name)" },
{ "blender_279_default_6100_ascii", 74417, 255, 0, 0, 0, "ufbxi_read_take(uc, node)" },
{ "blender_279_default_6100_ascii", 74431, 255, 0, 0, 0, "ufbxi_find_val2(node, ufbxi_ReferenceTime, \"LL\", &beg..." },
{ "blender_279_default_6100_ascii", 74694, 0, 0, 0, 0, "ufbxi_get_val1(child, \"C\", (char**)&old_name)" },
{ "blender_279_default_6100_ascii", 74694, 0, 0, 0, 0, "ufbxi_read_take_object(uc, child, layer_id)" },
{ "blender_279_default_6100_ascii", 74694, 0, 0, 0, 0, "ufbxi_read_take_prop_channel(uc, child, node_id, layer_..." },
{ "blender_279_default_6100_ascii", 74781, 74, 0, 0, 0, "ufbxi_find_val1(node, ufbxi_KeyCount, \"Z\", &num_keys)" },
{ "blender_279_default_6100_ascii", 74781, 74, 0, 0, 0, "ufbxi_read_take_anim_channel(uc, channel_nodes[i], id, ..." },
{ "blender_279_default_6100_ascii", 74781, 74, 0, 0, 0, "ufbxi_read_take_prop_channel(uc, child, node_id, layer_..." },
{ "blender_279_default_6100_ascii", 74791, 48, 0, 0, 0, "data == data_end" },
{ "blender_279_default_6100_ascii", 74791, 50, 0, 0, 0, "data_end - data >= 2" },
{ "blender_279_default_6100_ascii", 74843, 75, 0, 0, 0, "Unknown key mode" },
{ "blender_279_default_6100_ascii", 893, 255, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_Objects)" },
{ "blender_279_default_6100_ascii", 997, 255, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &node)" },
{ "blender_279_default_6100_ascii", 997, 255, 0, 0, 0, "ufbxi_read_objects(uc)" },
{ "blender_279_default_7400_binary", -1, 0, 0, 0, 21089, "data" },
{ "blender_279_default_7400_binary", -1, 0, 0, 0, 21265, "uc->read_fn" },
{ "blender_279_default_7400_binary", -1, 0, 0, 0, 21265, "ufbxi_read_to(uc, decoded_data, encoded_size)" },
{ "blender_279_default_7400_binary", -1, 0, 0, 0, 21297, "uc->read_fn" },
{ "blender_279_default_7400_binary", -1, 0, 0, 0, 21297, "ufbxi_read_to(uc, decoded_data, encoded_size)" },
{ "blender_279_default_7400_binary", -1, 0, 0, 0, 33, "header" },
{ "blender_279_default_7400_binary", -1, 0, 0, 0, 49, "name" },
{ "blender_279_default_7400_binary", -1, 0, 0, 0, 81, "name" },
{ "blender_279_default_7400_binary", -1, 0, 0, 16, 0, "name" },
{ "blender_279_default_7400_binary", -1, 0, 0, 214, 0, "edges" },
{ "blender_279_default_7400_binary", -1, 0, 0, 239, 0, "ufbxi_merge_attribute_properties(uc, &parent.node->prop..." },
{ "blender_279_default_7400_binary", -1, 0, 0, 243, 0, "ufbxi_merge_properties(uc, &node->props, node->props.de..." },
{ "blender_279_default_7400_binary", -1, 0, 0, 32, 0, "name" },
{ "blender_279_default_7400_binary", -1, 0, 0, 387, 0, "edges" },
{ "blender_279_default_7400_binary", -1, 0, 0, 439, 0, "ufbxi_merge_properties(uc, &node->props, node->props.de..." },
{ "blender_279_default_7400_binary", -1, 0, 1004, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &uc->read_buffer, &uc->..." },
{ "blender_279_default_7400_binary", -1, 0, 1008, 0, 0, "arr" },
{ "blender_279_default_7400_binary", -1, 0, 1009, 0, 0, "arr_data" },
{ "blender_279_default_7400_binary", -1, 0, 1009, 0, 0, "data" },
{ "blender_279_default_7400_binary", -1, 0, 1221, 0, 0, "uc->attributes" },
{ "blender_279_default_7400_binary", -1, 0, 1244, 0, 0, "uc->geometries" },
{ "blender_279_default_7400_binary", -1, 0, 1245, 0, 0, "ufbxi_merge_properties(uc, props, props->defaults, prop..." },
{ "blender_279_default_7400_binary", -1, 0, 1345, 0, 0, "uc->templates" },
{ "blender_279_default_7400_binary", -1, 0, 1385, 0, 0, "attr" },
{ "blender_279_default_7400_binary", -1, 0, 1385, 0, 0, "ufbxi_read_node_attribute(uc, node, &object)" },
{ "blender_279_default_7400_binary", -1, 0, 1492, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &uc->read_buffer, &uc->..." },
{ "blender_279_default_7400_binary", -1, 0, 1497, 0, 0, "arr_data" },
{ "blender_279_default_7400_binary", -1, 0, 1497, 0, 0, "data" },
{ "blender_279_default_7400_binary", -1, 0, 1522, 0, 0, "arr" },
{ "blender_279_default_7400_binary", -1, 0, 158, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_MODEL, root..." },
{ "blender_279_default_7400_binary", -1, 0, 16, 0, 0, "node" },
{ "blender_279_default_7400_binary", -1, 0, 1715, 0, 0, "uc->attributes" },
{ "blender_279_default_7400_binary", -1, 0, 1745, 0, 0, "uc->geometries" },
{ "blender_279_default_7400_binary", -1, 0, 1746, 0, 0, "ufbxi_merge_properties(uc, props, props->defaults, prop..." },
{ "blender_279_default_7400_binary", -1, 0, 1748, 0, 0, "ufbxi_merge_attribute_properties(uc, &parent.node->prop..." },
{ "blender_279_default_7400_binary", -1, 0, 220, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_MODEL, root..." },
{ "blender_279_default_7400_binary", -1, 0, 32, 0, 0, "node" },
{ "blender_279_default_7400_binary", -1, 0, 32, 0, 0, "vals" },
{ "blender_279_default_7400_binary", -1, 0, 397, 0, 0, "tmpl" },
{ "blender_279_default_7400_binary", -1, 0, 48, 0, 0, "vals" },
{ "blender_279_default_7400_binary", -1, 0, 646, 0, 0, "tmpl" },
{ "blender_279_default_7400_binary", -1, 0, 66, 0, 0, "node->children" },
{ "blender_279_default_7400_binary", -1, 0, 84, 0, 0, "node->children" },
{ "blender_279_default_7400_binary", -1, 0, 859, 0, 0, "uc->templates" },
{ "blender_279_default_7400_binary", -1, 0, 898, 0, 0, "attr" },
{ "blender_279_default_7400_binary", -1, 0, 898, 0, 0, "ufbxi_read_node_attribute(uc, node, &object)" },
{ "blender_279_default_7400_binary", 18187, 0, 0, 0, 0, "ufbxi_check_string(*name)" },
{ "blender_279_default_7400_binary", 18278, 0, 0, 0, 0, "ufbxi_read_properties(uc, node, &object.props, &uc->tmp..." },
{ "blender_279_default_7400_binary", 21028, 255, 0, 0, 0, "ufbxi_read_geometry(uc, node, &object)" },
{ "blender_279_default_7400_binary", 21081, 255, 0, 0, 0, "Bad multivalue array type" },
{ "blender_279_default_7400_binary", 21081, 255, 0, 0, 0, "ufbxi_binary_parse_multivalue_array(uc, dst_type, arr_d..." },
{ "blender_279_default_7400_binary", 21081, 99, 0, 0, 0, "res == (ptrdiff_t)decoded_data_size" },
{ "blender_279_default_7400_binary", 21085, 255, 0, 0, 0, "size <= uc->opts.max_array_size" },
{ "blender_279_default_7400_binary", 21086, 0, 0, 0, 0, "encoded_size == decoded_data_size" },
{ "blender_279_default_7400_binary", 21086, 255, 0, 0, 0, "Bad array encoding" },
{ "blender_279_default_7400_binary", 21209, 255, 0, 0, 0, "Bad multivalue array type" },
{ "blender_279_default_7400_binary", 21349, 255, 0, 0, 0, "index_ix >= 0 && (size_t)index_ix < mesh->num_indices" },
{ "blender_279_default_7400_binary", 24, 255, 0, 0, 0, "num_values64 <= (uint64_t)uc->opts.max_node_values" },
{ "blender_279_default_7400_binary", 24, 255, 0, 0, 0, "ufbxi_binary_parse_node(uc, 0, UFBXI_PARSE_ROOT, &end, ..." },
{ "blender_279_default_7400_binary", 25664, 255, 0, 0, 0, "data" },
{ "blender_279_default_7400_binary", 31, 255, 0, 0, 0, "Bad values type" },
{ "blender_279_default_7400_binary", 3210, 1, 0, 0, 0, "ufbxi_parse_toplevel_child(uc, &child)" },
{ "blender_279_default_7400_binary", 3210, 1, 0, 0, 0, "ufbxi_read_document(uc)" },
{ "blender_279_default_7400_binary", 331, 0, 0, 0, 0, "str || length == 0" },
{ "blender_279_default_7400_binary", 331, 0, 0, 0, 0, "ufbxi_push_string_place_str(uc, &vals[i].s)" },
{ "blender_279_default_7400_binary", 35, 255, 0, 0, 0, "ufbxi_binary_parse_node(uc, 0, state, p_end, buf, true)" },
{ "blender_279_default_7400_binary", 355, 255, 0, 0, 0, "ufbxi_skip_bytes(uc, encoded_size)" },
{ "blender_279_default_7400_binary", 36, 255, 0, 0, 0, "ufbxi_read_bytes(uc, (size_t)to_skip)" },
{ "blender_279_default_7400_binary", 36, 255, 0, 0, 0, "ufbxi_skip_bytes(uc, values_end_offset - offset)" },
{ "blender_279_default_7400_binary", 3698, 0, 0, 0, 0, "ufbxi_get_val1(object, \"C\", (char**)&tmpl->type)" },
{ "blender_279_default_7400_binary", 3762, 0, 0, 0, 0, "ufbxi_get_val1(props, \"S\", &tmpl->sub_type)" },
{ "blender_279_default_7400_binary", 3830, 0, 0, 0, 0, "ufbxi_read_properties(uc, props, &tmpl->props, &uc->tmp..." },
{ "blender_279_default_7400_binary", 3868, 0, 0, 0, 0, "ufbxi_get_val_at(node, val_ix++, 'C', (char**)&subtype_..." },
{ "blender_279_default_7400_binary", 58, 0, 0, 0, 0, "ufbxi_parse_toplevel(uc, ufbxi_Documents)" },
{ "blender_279_default_7400_binary", 58, 255, 0, 0, 0, "current_offset == end_offset" },
{ "blender_279_default_7400_binary", 66, 0, 0, 0, 0, "offset <= values_end_offset" },
{ "blender_279_default_7400_binary", 70, 0, 0, 0, 0, "ufbxi_binary_parse_node(uc, depth + 1, parse_state, &en..." },
{ "blender_279_sausage_6100_ascii", -1, 0, 0, 7147, 0, "ufbxi_retain_array(uc, sizeof(ufbx_bone), &uc->scene.bo..." },
{ "blender_279_sausage_6100_ascii", -1, 0, 0, 7168, 0, "ufbxi_collect_nodes(uc, sizeof(ufbx_bone), &nodes, uc->..." },
{ "blender_279_sausage_6100_ascii", -1, 0, 0, 7169, 0, "skins" },
{ "blender_279_sausage_6100_ascii", -1, 0, 0, 9620, 0, "ufbxi_retain_array(uc, sizeof(ufbx_bone), &uc->scene.bo..." },
{ "blender_279_sausage_6100_ascii", -1, 0, 0, 9656, 0, "ufbxi_collect_nodes(uc, sizeof(ufbx_bone), &nodes, uc->..." },
{ "blender_279_sausage_6100_ascii", -1, 0, 0, 9661, 0, "skins" },
{ "blender_279_sausage_6100_ascii", -1, 0, 10006, 0, 0, "parent.skin_deformer->skin_index" },
{ "blender_279_sausage_6100_ascii", -1, 0, 4824, 0, 0, "deformer" },
{ "blender_279_sausage_6100_ascii", -1, 0, 4824, 0, 0, "ufbxi_read_deformer(uc, node, &object)" },
{ "blender_279_sausage_6100_ascii", -1, 0, 4888, 0, 0, "skin" },
{ "blender_279_sausage_6100_ascii", -1, 0, 5117, 0, 0, "uc->skin_deformers" },
{ "blender_279_sausage_6100_ascii", -1, 0, 5191, 0, 0, "deformer" },
{ "blender_279_sausage_6100_ascii", -1, 0, 5191, 0, 0, "ufbxi_read_deformer(uc, node, &object)" },
{ "blender_279_sausage_6100_ascii", -1, 0, 5257, 0, 0, "skin" },
{ "blender_279_sausage_6100_ascii", -1, 0, 5511, 0, 0, "uc->skin_deformers" },
{ "blender_279_sausage_6100_ascii", -1, 0, 574, 0, 0, "bone" },
{ "blender_279_sausage_6100_ascii", -1, 0, 6971, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_CURVE,..." },
{ "blender_279_sausage_6100_ascii", -1, 0, 7653, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_CURVE,..." },
{ "blender_279_sausage_6100_ascii", -1, 0, 816, 0, 0, "bone" },
{ "blender_279_sausage_6100_ascii", -1, 0, 9006, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_PROP, ..." },
{ "blender_279_sausage_6100_ascii", -1, 0, 9043, 0, 0, "uc->skin_clusters" },
{ "blender_279_sausage_6100_ascii", -1, 0, 9927, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_PROP, ..." },
{ "blender_279_sausage_6100_ascii", -1, 0, 9999, 0, 0, "uc->skin_clusters" },
{ "blender_279_sausage_6100_ascii", 11936, 85, 0, 0, 0, "mesh->num_vertices > 0" },
{ "blender_279_sausage_7400_binary", -1, 0, 2775, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_CURVE,..." },
{ "blender_279_sausage_7400_binary", -1, 0, 3232, 0, 0, "ufbxi_add_connectable(uc, UFBXI_CONNECTABLE_ANIM_CURVE,..." },
{ "blender_282_suzanne_7400_binary", -1, 0, 0, 160, 0, "mesh->uv_sets.data" },
{ "blender_282_suzanne_7400_binary", -1, 0, 0, 97, 0, "mesh->uv_sets.data" },
{ "blender_282_suzanne_7400_binary", 22907, 255, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &set->vertex_uv...." },
{ "blender_282_suzanne_7400_binary", 22994, 0, 0, 0, 0, "Invalid mapping" },
{ "maya_anim_light_7500_binary", -1, 0, 1238, 0, 0, "ufbxi_merge_properties(uc, props, attr->defaults, props..." },
{ "maya_anim_light_7500_binary", -1, 0, 907, 0, 0, "ufbxi_merge_properties(uc, props, attr->defaults, props..." },
{ "maya_auto_clamp_7100_ascii", -1, 0, 1136, 0, 0, "v" },
{ "maya_auto_clamp_7100_ascii", -1, 0, 1137, 0, 0, "v" },
{ "maya_auto_clamp_7100_ascii", -1, 0, 816, 0, 0, "v" },
{ "maya_auto_clamp_7100_ascii", -1, 0, 817, 0, 0, "v" },
{ "maya_color_sets_6100_binary", -1, 0, 0, 126, 0, "mesh->color_sets.data" },
{ "maya_color_sets_6100_binary", -1, 0, 0, 78, 0, "mesh->color_sets.data" },
{ "maya_color_sets_6100_binary", 9909, 255, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &set->vertex_col..." },
{ "maya_cone_6100_binary", 15524, 255, 0, 0, 0, "ufbxi_find_val1(n, ufbxi_MappingInformationType, \"C\",..." },
{ "maya_cone_6100_binary", 15571, 255, 0, 0, 0, "ufbxi_read_truncated_array(uc, &mesh->edge_crease, n, u..." },
{ "maya_cone_6100_binary", 16031, 255, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &mesh->vertex_cr..." },
{ "maya_cube_6100_ascii", -1, 0, 0, 372, 0, "v" },
{ "maya_cube_6100_ascii", -1, 0, 0, 560, 0, "v" },
{ "maya_cube_6100_binary", -1, 0, 0, 0, 10701, "val" },
{ "maya_cube_6100_binary", -1, 0, 0, 0, 10705, "val" },
{ "maya_cube_6100_binary", -1, 0, 273, 0, 0, "arr_data" },
{ "maya_cube_6100_binary", -1, 0, 442, 0, 0, "arr_data" },
{ "maya_cube_6100_binary", -1, 0, 488, 0, 0, "binormals" },
{ "maya_cube_6100_binary", -1, 0, 489, 0, 0, "tangents" },
{ "maya_cube_6100_binary", -1, 0, 802, 0, 0, "binormals" },
{ "maya_cube_6100_binary", -1, 0, 803, 0, 0, "tangents" },
{ "maya_cube_6100_binary", 10525, 255, 0, 0, 0, "ufbxi_find_val1(n, ufbxi_MappingInformationType, \"C\",..." },
{ "maya_cube_6100_binary", 10572, 255, 0, 0, 0, "arr" },
{ "maya_cube_6100_binary", 10572, 255, 0, 0, 0, "ufbxi_read_truncated_array(uc, &mesh->edge_smoothing, n..." },
{ "maya_cube_6100_binary", 10670, 255, 0, 0, 0, "Bad multivalue array type" },
{ "maya_cube_6100_binary", 6763, 23, 0, 0, 0, "vertices->size % 3 == 0" },
{ "maya_cube_6100_binary", 7448, 71, 0, 0, 0, "data->size % num_components == 0" },
{ "maya_cube_6100_binary", 8164, 255, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &layer->elem.dat..." },
{ "maya_cube_6100_binary", 9038, 255, 0, 0, 0, "ufbxi_read_vertex_element(uc, mesh, n, &layer->elem.dat..." },
{ "maya_cube_7100_ascii", 8925, 255, 0, 0, 0, "ufbxi_ascii_accept(uc, UFBXI_ASCII_INT)" },
{ "maya_cube_7100_ascii", 8929, 255, 0, 0, 0, "ufbxi_ascii_accept(uc, UFBXI_ASCII_NAME)" },
{ "maya_cube_7100_ascii", 8935, 255, 0, 0, 0, "ufbxi_ascii_accept(uc, '}')" },
{ "maya_cube_7100_binary", -1, 0, 0, 132, 0, "ufbxi_push_string_place_str(uc, type)" },
{ "maya_cube_7100_binary", -1, 0, 0, 133, 0, "ufbxi_merge_properties(uc, &stack->props, object->props..." },
{ "maya_cube_7100_binary", -1, 0, 0, 137, 0, "ufbxi_merge_properties(uc, &layer->layer_props, object-..." },
{ "maya_cube_7100_binary", -1, 0, 0, 225, 0, "ufbxi_push_string_place_str(uc, type)" },
{ "maya_cube_7100_binary", -1, 0, 0, 226, 0, "ufbxi_merge_properties(uc, &stack->props, object->props..." },
{ "maya_cube_7100_binary", -1, 0, 0, 233, 0, "ufbxi_merge_properties(uc, &layer->layer_props, object-..." },
{ "maya_cube_7100_binary", -1, 0, 1117, 0, 0, "stack" },
{ "maya_cube_7100_binary", -1, 0, 1117, 0, 0, "ufbxi_read_animation_stack(uc, node, &object)" },
{ "maya_cube_7100_binary", -1, 0, 1121, 0, 0, "layer" },
{ "maya_cube_7100_binary", -1, 0, 1121, 0, 0, "ufbxi_read_animation_layer(uc, node, &object)" },
{ "maya_cube_7100_binary", -1, 0, 658, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &uc->convert_buffer, &u..." },
{ "maya_cube_7100_binary", -1, 0, 807, 0, 0, "stack" },
{ "maya_cube_7100_binary", -1, 0, 807, 0, 0, "ufbxi_read_animation_stack(uc, node, &object)" },
{ "maya_cube_7100_binary", -1, 0, 811, 0, 0, "layer" },
{ "maya_cube_7100_binary", -1, 0, 811, 0, 0, "ufbxi_read_animation_layer(uc, node, &object)" },
{ "maya_cube_7100_binary", -1, 0, 965, 0, 0, "ufbxi_grow_array(&uc->ator_tmp, &uc->convert_buffer, &u..." },
{ "maya_game_sausage_6100_binary_deform", 44932, 98, 0, 0, 0, "data_end - data >= 2" },
{ "maya_game_sausage_6100_binary_wiggle", 53586, 79, 0, 0, 0, "data_end - data >= 1" },
{ "maya_game_sausage_6100_binary", 45318, 0, 0, 0, 0, "indices->size == weights->size" },
{ "maya_game_sausage_6100_binary", 45470, 0, 0, 0, 0, "transform->size >= 16" },
{ "maya_game_sausage_6100_binary", 45636, 0, 0, 0, 0, "transform_link->size >= 16" },
{ "maya_interpolation_modes_6100_binary", 16706, 0, 0, 0, 0, "ufbxi_get_val1(node, \"c\", (char**)&type_and_name)" },
{ "maya_interpolation_modes_6100_binary", 16930, 255, 0, 0, 0, "size <= UFBXI_MAX_ALLOCATION_SIZE" },
{ "maya_interpolation_modes_6100_binary", 16936, 0, 0, 0, 0, "data_end - data >= 2" },
{ "maya_interpolation_modes_6100_binary", 16936, 73, 0, 0, 0, "data_end - data >= 1" },
{ "maya_interpolation_modes_6100_binary", 16969, 255, 0, 0, 0, "Unknown slope mode" },
{ "maya_interpolation_modes_6100_binary", 16989, 255, 0, 0, 0, "Unknown weight mode" },
{ "maya_interpolation_modes_7500_ascii", -1, 0, 1168, 0, 0, "v" },
{ "maya_interpolation_modes_7500_ascii", -1, 0, 851, 0, 0, "v" },
{ "maya_interpolation_modes_7500_ascii", 13357, 43, 0, 0, 0, "times->size == values->size" },
{ "maya_interpolation_modes_7500_ascii", 14126, 43, 0, 0, 0, "attr_flags->size == refs->size" },
{ "maya_interpolation_modes_7500_ascii", 15551, 43, 0, 0, 0, "attrs->size == refs->size * 4u" },
{ "maya_interpolation_modes_7500_binary", -1, 0, 0, 143, 0, "keys" },
{ "maya_interpolation_modes_7500_binary", -1, 0, 0, 243, 0, "keys" },
{ "maya_interpolation_modes_7500_binary", -1, 0, 1177, 0, 0, "curve" },
{ "maya_interpolation_modes_7500_binary", -1, 0, 1234, 0, 0, "prop" },
{ "maya_interpolation_modes_7500_binary", -1, 0, 1234, 0, 0, "ufbxi_read_animation_curve_node(uc, node, &object)" },
{ "maya_interpolation_modes_7500_binary", -1, 0, 861, 0, 0, "curve" },
{ "maya_interpolation_modes_7500_binary", -1, 0, 897, 0, 0, "prop" },
{ "maya_interpolation_modes_7500_binary", -1, 0, 897, 0, 0, "ufbxi_read_animation_curve_node(uc, node, &object)" },
{ "maya_interpolation_modes_7500_binary", 24310, 255, 0, 0, 0, "times = ufbxi_find_array(node, ufbxi_KeyTime, 'l')" },
{ "maya_interpolation_modes_7500_binary", 24310, 255, 0, 0, 0, "ufbxi_read_animation_curve(uc, node, &object)" },
{ "maya_interpolation_modes_7500_binary", 24387, 255, 0, 0, 0, "values = ufbxi_find_array(node, ufbxi_KeyValueFloat, 'r..." },
{ "maya_interpolation_modes_7500_binary", 24418, 255, 0, 0, 0, "Bad multivalue array type" },
{ "maya_interpolation_modes_7500_binary", 24528, 255, 0, 0, 0, "attr_flags = ufbxi_find_array(node, ufbxi_KeyAttrFlags,..." },
{ "maya_interpolation_modes_7500_binary", 24627, 255, 0, 0, 0, "attrs = ufbxi_find_array(node, ufbxi_KeyAttrDataFloat, ..." },
{ "maya_interpolation_modes_7500_binary", 24724, 255, 0, 0, 0, "refs = ufbxi_find_array(node, ufbxi_KeyAttrRefCount, 'i..." },
{ "maya_interpolation_modes_7500_binary", 24765, 255, 0, 0, 0, "Bad multivalue array type" },
{ "maya_interpolation_modes_7500_binary", 25023, 0, 0, 0, 0, "refs_left >= 0" },
};
void ufbxt_do_file_test(const char *name, void (*test_fn)(ufbx_scene *s, ufbxt_diff_error *err), const char *suffix, ufbx_load_opts user_opts)
{
const uint32_t file_versions[] = { 6100, 7100, 7400, 7500, 7700 };
char buf[512];
snprintf(buf, sizeof(buf), "%s%s.obj", data_root, name);
size_t obj_size = 0;
void *obj_data = ufbxt_read_file(buf, &obj_size);
ufbxt_obj_file *obj_file = obj_data ? ufbxt_load_obj(obj_data, obj_size) : NULL;
free(obj_data);
char base_name[512];
ufbxt_begin_fuzz();
uint32_t num_opened = 0;
for (uint32_t vi = 0; vi < ufbxt_arraycount(file_versions); vi++) {
for (uint32_t fi = 0; fi < 2; fi++) {
uint32_t version = file_versions[vi];
const char *format = fi == 1 ? "ascii" : "binary";
if (suffix) {
snprintf(buf, sizeof(buf), "%s%s_%u_%s_%s.fbx", data_root, name, version, format, suffix);
snprintf(base_name, sizeof(base_name), "%s_%u_%s_%s", name, version, format, suffix);
} else {
snprintf(buf, sizeof(buf), "%s%s_%u_%s.fbx", data_root, name, version, format);
snprintf(base_name, sizeof(base_name), "%s_%u_%s", name, version, format);
}
if (g_file_version && version != g_file_version) continue;
if (g_file_type && strcmp(format, g_file_type)) continue;
size_t size = 0;
void *data = ufbxt_read_file(buf, &size);
if (!data) continue;
num_opened++;
ufbxt_logf("%s", buf);
ufbx_error error;
ufbx_load_opts load_opts = user_opts;
if (g_dedicated_allocs) {
load_opts.temp_huge_size = 1;
load_opts.result_huge_size = 1;
}
uint64_t load_begin = cputime_cpu_tick();
ufbx_scene *scene = ufbx_load_memory(data, size, &load_opts, &error);
uint64_t load_end = cputime_cpu_tick();
if (!scene) {
ufbxt_log_error(&error);
ufbxt_assert_fail(__FILE__, __LINE__, "Failed to parse file");
}
ufbx_load_opts stream_opts = load_opts;
stream_opts.read_buffer_size = 1;
ufbx_scene *streamed_scene = ufbx_load_file(buf, &stream_opts, &error);
if (streamed_scene) {
ufbxt_check_scene(scene);
} else {
ufbxt_log_error(&error);
ufbxt_assert_fail(__FILE__, __LINE__, "Failed to parse streamed file");
}
ufbx_free_scene(streamed_scene);
// Ignore geometry, animations, and both
{
ufbx_load_opts opts = load_opts;
opts.ignore_geometry = true;
ufbx_scene *ignore_scene = ufbx_load_memory(data, size, &opts, NULL);
ufbxt_check_scene(scene);
ufbx_free_scene(ignore_scene);
}
{
ufbx_load_opts opts = load_opts;
opts.ignore_animation = true;
ufbx_scene *ignore_scene = ufbx_load_memory(data, size, &opts, NULL);
ufbxt_check_scene(scene);
ufbx_free_scene(ignore_scene);
}
{
ufbx_load_opts opts = load_opts;
opts.ignore_geometry = true;
opts.ignore_animation = true;
ufbx_scene *ignore_scene = ufbx_load_memory(data, size, &opts, NULL);
ufbxt_check_scene(scene);
ufbx_free_scene(ignore_scene);
}
ufbxt_logf(".. Loaded in %.2fms: File %.1fkB, temp %.1fkB (%zu allocs), result %.1fkB (%zu allocs)",
cputime_cpu_delta_to_sec(NULL, load_end - load_begin) * 1e3,
(double)size * 1e-3,
(double)scene->metadata.temp_memory_used * 1e-3,
scene->metadata.temp_allocs,
(double)scene->metadata.result_memory_used * 1e-3,
scene->metadata.result_allocs
);
ufbxt_assert(scene->metadata.ascii == ((fi == 1) ? 1 : 0));
ufbxt_assert(scene->metadata.version == version);
ufbxt_check_scene(scene);
ufbxt_diff_error err = { 0 };
if (obj_file) {
ufbxt_diff_to_obj(scene, obj_file, &err, false);
}
test_fn(scene, &err);
if (err.num > 0) {
ufbx_real avg = err.sum / (ufbx_real)err.num;
ufbxt_logf(".. Absolute diff: avg %.3g, max %.3g (%zu tests)", avg, err.max, err.num);
}
size_t temp_allocs = scene->metadata.temp_allocs;
size_t result_allocs = scene->metadata.result_allocs;
ufbx_free_scene(scene);
if (g_fuzz) {
size_t step = 0;
size_t fail_step = 0;
int i;
g_fuzz_test_name = base_name;
#pragma omp parallel for schedule(static, 16)
for (i = 0; i < (int)temp_allocs; i++) {
if (omp_get_thread_num() == 0) {
if (i % 16 == 0) {
fprintf(stderr, "\rFuzzing temp limit %s: %d/%d", base_name, i, (int)temp_allocs);
fflush(stderr);
}
}
size_t step = 10000000 + (size_t)i;
if (!ufbxt_test_fuzz(data, size, step, -1, (size_t)i, 0, 0)) fail_step = step;
}
fprintf(stderr, "\rFuzzing temp limit %s: %d/%d\n", base_name, (int)temp_allocs, (int)temp_allocs);
#pragma omp parallel for schedule(static, 16)
for (i = 0; i < (int)result_allocs; i++) {
if (omp_get_thread_num() == 0) {
if (i % 16 == 0) {
fprintf(stderr, "\rFuzzing result limit %s: %d/%d", base_name, i, (int)result_allocs);
fflush(stderr);
}
}
size_t step = 20000000 + (size_t)i;
if (!ufbxt_test_fuzz(data, size, step, -1, 0, (size_t)i, 0)) fail_step = step;
}
fprintf(stderr, "\rFuzzing result limit %s: %d/%d\n", base_name, (int)result_allocs, (int)result_allocs);
#pragma omp parallel for schedule(static, 16)
for (i = 1; i < (int)size; i++) {
if (omp_get_thread_num() == 0) {
if (i % 16 == 0) {
fprintf(stderr, "\rFuzzing truncate %s: %d/%d", base_name, i, (int)size);
fflush(stderr);
}
}
size_t step = 30000000 + (size_t)i;
if (!ufbxt_test_fuzz(data, size, step, -1, 0, 0, (size_t)i)) fail_step = step;
}
fprintf(stderr, "\rFuzzing truncate %s: %d/%d\n", base_name, (int)size, (int)size);
if (!g_no_patch) {
uint8_t *data_copy[256] = { 0 };
int patch_start = g_patch_start - omp_get_num_threads() * 16;
if (patch_start < 0) {
patch_start = 0;
}
#pragma omp parallel for schedule(static, 16)
for (i = patch_start; i < (int)size; i++) {
if (omp_get_thread_num() == 0) {
if (i % 16 == 0) {
fprintf(stderr, "\rFuzzing patch %s: %d/%d", base_name, i, (int)size);
fflush(stderr);
}
}
uint8_t **p_data_copy = &data_copy[omp_get_thread_num()];
if (*p_data_copy == NULL) {
*p_data_copy = malloc(size);
memcpy(*p_data_copy, data, size);
}
uint8_t *data_u8 = *p_data_copy;
size_t step = i * 1000;
uint8_t original = data_u8[i];
if (g_all_byte_values) {
for (uint32_t v = 0; v < 256; v++) {
data_u8[i] = (uint8_t)v;
if (!ufbxt_test_fuzz(data_u8, size, step + v, i, 0, 0, 0)) fail_step = step + v;
}
} else {
data_u8[i] = original + 1;
if (!ufbxt_test_fuzz(data_u8, size, step + 1, i, 0, 0, 0)) fail_step = step + 1;
data_u8[i] = original - 1;
if (!ufbxt_test_fuzz(data_u8, size, step + 2, i, 0, 0, 0)) fail_step = step + 2;
if (original != 0) {
data_u8[i] = 0;
if (!ufbxt_test_fuzz(data_u8, size, step + 3, i, 0, 0, 0)) fail_step = step + 3;
}
if (original != 0xff) {
data_u8[i] = 0xff;
if (!ufbxt_test_fuzz(data_u8, size, step + 4, i, 0, 0, 0)) fail_step = step + 4;
}
}
data_u8[i] = original;
}
fprintf(stderr, "\rFuzzing patch %s: %d/%d\n", base_name, (int)size, (int)size);
for (size_t i = 0; i < ufbxt_arraycount(data_copy); i++) {
free(data_copy[i]);
}
}
ufbxt_hintf("Fuzz failed on step: %zu", step);
ufbxt_assert(fail_step == 0);
} else {
uint8_t *data_u8 = (uint8_t*)data;
// Run a couple of known fuzz checks
for (size_t i = 0; i < ufbxt_arraycount(g_fuzz_checks); i++) {
const ufbxt_fuzz_check *check = &g_fuzz_checks[i];
if (strcmp(check->name, base_name)) continue;
uint8_t original;
if (check->patch_offset >= 0) {
original = data_u8[check->patch_offset];
ufbxt_logf(".. Patch byte %u from 0x%02x to 0x%02x: %s", check->patch_offset, original, check->patch_value, check->description);
ufbxt_assert(check->patch_offset < size);
data_u8[check->patch_offset] = check->patch_value;
}
ufbx_load_opts opts = { 0 };
if (check->temp_limit > 0) {
ufbxt_logf(".. Temp limit %u: %s", check->temp_limit, check->description);
opts.max_temp_allocs = check->temp_limit;
}
if (check->result_limit > 0) {
ufbxt_logf(".. Result limit %u: %s", check->result_limit, check->description);
opts.max_result_allocs = check->result_limit;
}
size_t truncated_size = size;
if (check->truncate_length > 0) {
ufbxt_logf(".. Truncated length %u: %s", check->truncate_length, check->description);
truncated_size = check->truncate_length;
}
ufbx_error error;
ufbx_scene *scene = ufbx_load_memory(data, truncated_size, &opts, &error);
if (scene) {
ufbxt_check_scene(scene);
ufbx_free_scene(scene);
}
if (check->patch_offset >= 0) {
data_u8[check->patch_offset] = original;
}
}
}
free(data);
}
}
if (num_opened == 0) {
ufbxt_assert_fail(__FILE__, __LINE__, "File not found");
}
free(obj_file);
}
#define UFBXT_IMPL 1
#define UFBXT_TEST(name) void ufbxt_test_fn_##name(void)
#define UFBXT_FILE_TEST(name) void ufbxt_test_fn_imp_file_##name(ufbx_scene *scene, ufbxt_diff_error *err); \
void ufbxt_test_fn_file_##name(void) { \
ufbx_load_opts user_opts = { 0 }; \
ufbxt_do_file_test(#name, &ufbxt_test_fn_imp_file_##name, NULL, user_opts); } \
void ufbxt_test_fn_imp_file_##name(ufbx_scene *scene, ufbxt_diff_error *err)
#define UFBXT_FILE_TEST_OPTS(name, get_opts) void ufbxt_test_fn_imp_file_##name(ufbx_scene *scene, ufbxt_diff_error *err); \
void ufbxt_test_fn_file_##name(void) { \
ufbxt_do_file_test(#name, &ufbxt_test_fn_imp_file_##name, NULL, get_opts); } \
void ufbxt_test_fn_imp_file_##name(ufbx_scene *scene, ufbxt_diff_error *err)
#define UFBXT_FILE_TEST_SUFFIX(name, suffix) void ufbxt_test_fn_imp_file_##name##_##suffix(ufbx_scene *scene, ufbxt_diff_error *err); \
void ufbxt_test_fn_file_##name##_##suffix(void) { \
ufbx_load_opts user_opts = { 0 }; \
ufbxt_do_file_test(#name, &ufbxt_test_fn_imp_file_##name##_##suffix, #suffix, user_opts); } \
void ufbxt_test_fn_imp_file_##name##_##suffix(ufbx_scene *scene, ufbxt_diff_error *err)
#define UFBXT_FILE_TEST_SUFFIX_OPTS(name, suffix, get_opts) void ufbxt_test_fn_imp_file_##name##_##suffix(ufbx_scene *scene, ufbxt_diff_error *err); \
void ufbxt_test_fn_file_##name##_##suffix(void) { \
ufbxt_do_file_test(#name, &ufbxt_test_fn_imp_file_##name##_##suffix, #suffix, get_opts); } \
void ufbxt_test_fn_imp_file_##name##_##suffix(ufbx_scene *scene, ufbxt_diff_error *err)
#include "all_tests.h"
#undef UFBXT_IMPL
#undef UFBXT_TEST
#undef UFBXT_FILE_TEST
#undef UFBXT_FILE_TEST_OPTS
#undef UFBXT_FILE_TEST_SUFFIX
#undef UFBXT_FILE_TEST_SUFFIX_OPTS
#define UFBXT_IMPL 0
#define UFBXT_TEST(name) { #name, &ufbxt_test_fn_##name },
#define UFBXT_FILE_TEST(name) { #name, &ufbxt_test_fn_file_##name },
#define UFBXT_FILE_TEST_OPTS(name, get_opts) { #name, &ufbxt_test_fn_file_##name },
#define UFBXT_FILE_TEST_SUFFIX(name, suffix) { #name "_" #suffix, &ufbxt_test_fn_file_##name##_##suffix },
#define UFBXT_FILE_TEST_SUFFIX_OPTS(name, suffix, get_opts) { #name "_" #suffix, &ufbxt_test_fn_file_##name##_##suffix },
ufbxt_test g_tests[] = {
#include "all_tests.h"
};
int ufbxt_run_test(ufbxt_test *test)
{
printf("%s: ", test->name);
fflush(stdout);
g_error.stack_size = 0;
g_hint[0] = '\0';
g_current_test = test;
if (!setjmp(g_test_jmp)) {
g_skip_print_ok = false;
test->func();
if (!g_skip_print_ok) {
printf("OK\n");
fflush(stdout);
}
return 1;
} else {
if (g_hint[0]) {
ufbxt_logf("Hint: %s", g_hint);
}
if (g_error.stack_size) {
ufbxt_log_error(&g_error);
}
return 0;
}
}
int main(int argc, char **argv)
{
uint32_t num_tests = ufbxt_arraycount(g_tests);
uint32_t num_ok = 0;
const char *test_filter = NULL;
cputime_init();
for (int i = 1; i < argc; i++) {
if (!strcmp(argv[i], "-v")) {
g_verbose = 1;
}
if (!strcmp(argv[i], "-t")) {
if (++i < argc) {
test_filter = argv[i];
}
}
if (!strcmp(argv[i], "-d")) {
if (++i < argc) {
size_t len = strlen(argv[i]);
if (len + 2 > sizeof(data_root)) {
fprintf(stderr, "-d: Data root too long");
return 1;
}
memcpy(data_root, argv[i], len);
char end = argv[i][len - 1];
if (end != '/' && end != '\\') {
data_root[len] = '/';
data_root[len + 1] = '\0';
}
}
}
if (!strcmp(argv[i], "-f")) {
if (++i < argc) g_file_version = (uint32_t)atoi(argv[i]);
if (++i < argc) g_file_type = argv[i];
}
if (!strcmp(argv[i], "--fuzz")) {
g_fuzz = true;
}
if (!strcmp(argv[i], "--patch-all-byte-values")) {
g_all_byte_values = true;
}
if (!strcmp(argv[i], "--patch-start")) {
if (++i < argc) g_patch_start = atoi(argv[i]);
}
if (!strcmp(argv[i], "--dedicated-allocs")) {
g_dedicated_allocs = true;
}
if (!strcmp(argv[i], "--no-patch")) {
g_no_patch = true;
}
if (!strcmp(argv[i], "--threads")) {
#if _OPENMP
if (++i < argc) omp_set_num_threads(atoi(argv[i]));
#endif
}
if (!strcmp(argv[i], "--fuzz-step")) {
if (++i < argc) g_fuzz_step = (size_t)atoi(argv[i]);
}
}
#ifdef _OPENMP
if (omp_get_num_threads() > 256) {
omp_set_num_threads(256);
}
#else
if (g_fuzz) {
fprintf(stderr, "Fuzzing without threads, compile with OpenMP for better performance!\n");
}
#endif
uint32_t num_ran = 0;
for (uint32_t i = 0; i < num_tests; i++) {
ufbxt_test *test = &g_tests[i];
if (test_filter && strcmp(test->name, test_filter)) {
continue;
}
num_ran++;
if (ufbxt_run_test(test)) {
num_ok++;
}
ufbxt_log_flush();
}
if (num_ok < num_tests) {
printf("\n");
for (uint32_t i = 0; i < num_tests; i++) {
ufbxt_test *test = &g_tests[i];
if (test->fail.failed) {
ufbxt_fail *fail = &test->fail;
const char *file = fail->file, *find;
find = strrchr(file, '/');
file = find ? find + 1 : file;
find = strrchr(file, '\\');
file = find ? find + 1 : file;
printf("(%s) %s:%u: %s\n", test->name,
file, fail->line, fail->expr);
}
}
}
printf("\nTests passed: %u/%u\n", num_ok, num_ran);
if (g_fuzz) {
printf("Fuzz checks:\n\nstatic const ufbxt_fuzz_check g_fuzz_checks[] = {\n");
for (size_t i = 0; i < ufbxt_arraycount(g_checks); i++) {
ufbxt_check_line *check = &g_checks[i];
if (check->patch_offset == 0) continue;
char safe_desc[60];
size_t safe_desc_len = 0;
for (const char *c = check->description; *c; c++) {
if (sizeof(safe_desc) - safe_desc_len < 6) {
safe_desc[safe_desc_len++] = '.';
safe_desc[safe_desc_len++] = '.';
safe_desc[safe_desc_len++] = '.';
break;
}
if (*c == '"' || *c == '\\') {
safe_desc[safe_desc_len++] = '\\';
}
safe_desc[safe_desc_len++] = *c;
}
safe_desc[safe_desc_len] = '\0';
int32_t patch_offset = check->patch_offset != UINT32_MAX ? (int32_t)(check->patch_offset - 1) : -1;
printf("\t{ \"%s\", %d, %u, %u, %u, %u, \"%s\" },\n", check->test_name,
patch_offset, (uint32_t)check->patch_value, (uint32_t)check->temp_limit, (uint32_t)check->result_limit, (uint32_t)check->truncate_length, safe_desc);
free(check->test_name);
}
printf("};\n");
}
return num_ok == num_ran ? 0 : 1;
}
|
decrypt_md5_parallel.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include "md5.h"
// *******************************
// Funções de tempo
// *******************************
struct timeval start, stop;
unsigned diffsec, diffusec;
int a[2];
void compute_time_and_flush();
void time_log_start() {
gettimeofday(&start, NULL);
}
void time_log_stop() {
gettimeofday(&stop, NULL);
compute_time_and_flush();
}
void compute_time_and_flush() {
diffsec = stop.tv_sec - start.tv_sec;
diffusec = (stop.tv_usec - start.tv_usec) >= 0 ? (stop.tv_usec - start.tv_usec) : 1000000 - stop.tv_usec;
printf("%d.%d seconds\n", diffsec, diffusec);
}
// *******************************
// MD5
// *******************************
// MD5 functions
#define F(x,y,z) ((x & y) | (~x & z))
#define G(x,y,z) ((x & z) | (y & ~z))
#define H(x,y,z) (x ^ y ^ z)
#define I(x,y,z) (y ^ (x | ~z))
// Rotate a 32 bit number left by n bits
#define ROTATE(x,n) ((x << n) | (x >> (32-n)))
// Swap endianess of 32 bit number
#define SWAP(x) ( ((x >> 24) & 0x000000ff) | ((x << 8) & 0x00ff0000) \
| ((x >> 8) & 0x0000ff00) | ((x << 24) & 0xff000000) );
// CONTANTS -------------------------------------------------------------------
// Magic numbers
static const uint32_t A = 0x67452301,
B = 0xEFCDAB89,
C = 0x98BADCFE,
D = 0x10325476;
static const uint32_t K[64] =
{ 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee,
0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be,
0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa,
0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8,
0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed,
0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c,
0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05,
0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039,
0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1,
0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391 };
// Byte selectors
static const uint32_t M[64] =
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
1, 6, 11, 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12,
5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2,
0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9 };
// Shift values
static const uint32_t S[64] =
{ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22,
5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20,
4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23,
6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21 };
// FUNCTIONS ------------------------------------------------------------------
void md5_init (md5_t* self) {
self->bufsize = 0;
self->bitsize = 0;
self->rawhash[0] = A;
self->rawhash[1] = B;
self->rawhash[2] = C;
self->rawhash[3] = D;
}
void md5_digest (md5_t* self) {
uint32_t data[16], a, b, c, d;
a = self->rawhash[0];
b = self->rawhash[1];
c = self->rawhash[2];
d = self->rawhash[3];
for (int i = 0, j = 0; i < 16; ++i, j+=4)
data[i] = (self->buffer[j] ) + (self->buffer[j + 1] << 8) +
(self->buffer[j + 2] << 16) + (self->buffer[j + 3] << 24);
for (int i = 0; i < 64; ++i) {
uint32_t func, temp;
if (i < 16) func = F(b,c,d);
else if (i < 32) func = G(b,c,d);
else if (i < 48) func = H(b,c,d);
else if (i < 64) func = I(b,c,d);
temp = d;
d = c;
c = b;
b = b + ROTATE(a + func + data[M[i]] + K[i], S[i]);
a = temp;
}
self->rawhash[0] += a;
self->rawhash[1] += b;
self->rawhash[2] += c;
self->rawhash[3] += d;
}
void md5_update (md5_t* self, byte_t* data, size_t length) {
for (size_t i = 0; i < length; ++i) {
self->buffer[self->bufsize] = data[i];
self->bufsize++;
if (self->bufsize == 64) {
md5_digest(self);
self->bitsize += 512;
self->bufsize = 0;
}
}
}
void md5_hash (md5_t* self, uint32_t hash[4]) {
size_t i = self->bufsize;
self->buffer[i++] = 0x80; // append single bit to message
while (i < 64) self->buffer[i++] = 0x00; // pad with zeros
if (self->bufsize >= 55) {
md5_digest(self);
for (i = 0; i < 64; ++i) self->buffer[i] = 0x00;
}
self->bitsize += self->bufsize * 8;
self->buffer[56] = self->bitsize;
self->buffer[57] = self->bitsize >> 8;
self->buffer[58] = self->bitsize >> 16;
self->buffer[59] = self->bitsize >> 24;
self->buffer[60] = self->bitsize >> 32;
self->buffer[61] = self->bitsize >> 40;
self->buffer[62] = self->bitsize >> 48;
self->buffer[63] = self->bitsize >> 56;
md5_digest(self);
hash[0] = SWAP(self->rawhash[0]);
hash[1] = SWAP(self->rawhash[1]);
hash[2] = SWAP(self->rawhash[2]);
hash[3] = SWAP(self->rawhash[3]);
}
// ===================
// *******************************
// Métodos Comparativos
// *******************************
#define NUMERO_DE_THREADS 8
#define TAMANHO_PALAVRA 5
#define TOTAL_CARACTERES 70
const byte_t ARRAY_CARACTERES[] = {
0x21, 0x23, 0x24, 0x25, 0x2b, 0x3d, 0x3f, 0x40, 0x79, 0x7a, // ! # $ % + = ? @ y z
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, // 0 1 2 3 4 5 6 7 8 9
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0X4a, // A B C D E F G H I J
0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0X54, // K L M N O P Q R S T
0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x61, 0x62, 0x63, 0X64, // U V W X Y Z a b c d
0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0X6e, // e f g h i j k l m n
0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0X78 // o p q r s t u v w x
};
int comparar_hashes (const uint32_t a[], const uint32_t b[]) {
return a[0] == b[0] && a[1] == b[1] && a[2] == b[2] && a[3] == b[3];
}
int forcar_quebra_hash_MD5 (const uint32_t hashOriginal[4], uint32_t hashGerada[4], md5_t MD5, byte_t* resultado, byte_t* stringTeste, int lengthPalavraAtual) {
if (lengthPalavraAtual < TAMANHO_PALAVRA - 1) {
for (int i = 0; i < TOTAL_CARACTERES; ++i) {
stringTeste[lengthPalavraAtual] = ARRAY_CARACTERES[i];
if (forcar_quebra_hash_MD5(hashOriginal, hashGerada, MD5, resultado, stringTeste, lengthPalavraAtual + 1))
return 1;
}
} else {
for (int i = 0; i < TOTAL_CARACTERES; ++i) {
stringTeste[lengthPalavraAtual] = ARRAY_CARACTERES[i];
md5_init(&MD5);
md5_update(&MD5, stringTeste, TAMANHO_PALAVRA);
md5_hash(&MD5, hashGerada);
if (comparar_hashes(hashOriginal, hashGerada)) {
strcpy(resultado, stringTeste);
return 1;
}
}
}
return 0;
}
// *******************************
// Main
// *******************************
int main (){
byte_t str[ TAMANHO_PALAVRA + 1 ];
uint32_t hashOriginal[4];
uint32_t hashGerada[4];
md5_t MD5;
char hexstring[33] = {0};
char resultado[TAMANHO_PALAVRA];
int threadId;
// Limpar vetor resultado
memset(resultado, '\0', TAMANHO_PALAVRA);
printf("Informe a hash a ser quebrada: ");
fgets(hexstring, 33, stdin);
for (int i = 0; i < 4; i++)
sscanf(&hexstring[i * 8], "%8x", &hashOriginal[i]);
printf("\n===================================\n");
printf("Número de threads: %d\n", NUMERO_DE_THREADS);
printf("Número de letras: %d\n", TAMANHO_PALAVRA);
printf("===================================\n");
// Iniciar contagem de tempo
time_log_start();
static int encontrado = 0;
omp_set_num_threads(NUMERO_DE_THREADS);
#pragma omp parallel private(threadId, str, hashGerada, MD5) shared(hashOriginal, encontrado)
{
threadId = omp_get_thread_num() + 1;
printf("Thread número %d rodando...\n", threadId);
#pragma omp for schedule(dynamic)
for (int i = 0; i <= TOTAL_CARACTERES; ++i) {
if(encontrado == 0){
str[0] = ARRAY_CARACTERES[i];
encontrado = forcar_quebra_hash_MD5 (hashOriginal, hashGerada, MD5, resultado, str, 1);
}
}
}
printf("\n===================================\n");
printf("Texto original: \n%s\n", resultado);
printf("===================================\n");
// Mostrar tempo de execução
printf("\n===================================\n");
printf("Tempo de Execução:\n");
time_log_stop();
printf("===================================\n");
return 0;
} |
sht_legendre.c | /*
* Copyright (c) 2010-2015 Centre National de la Recherche Scientifique.
* written by Nathanael Schaeffer (CNRS, ISTerre, Grenoble, France).
*
* nathanael.schaeffer@ujf-grenoble.fr
*
* This software is governed by the CeCILL license under French law and
* abiding by the rules of distribution of free software. You can use,
* modify and/or redistribute the software under the terms of the CeCILL
* license as circulated by CEA, CNRS and INRIA at the following URL
* "http://www.cecill.info".
*
* The fact that you are presently reading this means that you have had
* knowledge of the CeCILL license and that you accept its terms.
*
*/
/** \internal \file sht_legendre.c
\brief Compute legendre polynomials and associated functions.
- Normalization of the associated functions for various spherical harmonics conventions are possible, with or without Condon-Shortley phase.
- When computing the derivatives (with respect to colatitude theta), there are no singularities.
- written by Nathanael Schaeffer / CNRS, with some ideas and code from the GSL 1.13 and Numerical Recipies.
The associated legendre functions are computed using the following recurrence formula :
\f[ Y_l^m(x) = a_l^m \, x \ Y_{l-1}^m(x) + b_l^m \ Y_{l-2}^m(x) \f]
with starting values :
\f[ Y_m^m(x) = a_m^m \ (1-x^2)^{m/2} \f]
and
\f[ Y_{m+1}^m(x) = a_{m+1}^m \ Y_m^m(x) \f]
where \f$ a_l^m \f$ and \f$ b_l^m \f$ are coefficients that depend on the normalization, and which are
precomputed once for all by \ref legendre_precomp.
*/
// index in alm array for given im.
#define alm_im(shtns, im) (shtns->alm + (im)*(2*shtns->lmax - ((im)-1)*shtns->mres))
#if SHT_VERBOSE > 1
#define LEG_RANGE_CHECK
#endif
#ifndef HAVE_LONG_DOUBLE_WIDER
#define long_double_caps 0
typedef double real;
#define SQRT sqrt
#define COS cos
#define SIN sin
#define FABS fabs
#define legendre_sphPlm_hp legendre_sphPlm
#define legendre_sphPlm_array_hp legendre_sphPlm_array
#define legendre_sphPlm_deriv_array_hp legendre_sphPlm_deriv_array
#else
int long_double_caps = 0;
typedef long double real;
#define SQRT sqrtl
#define COS cosl
#define SIN sinl
#define FABS fabsl
// scale factor applied for LMAX larger than SHT_L_RESCALE. Allows accurate transforms up to l=2700 with 64 bit double precision.
#define SHT_LEG_SCALEF 1.1018032079253110206e-280
#define SHT_L_RESCALE 1536
// returns 1 for large exponent only => useless.
// returns 2 for extended precision only => ok to improve gauss points.
// returns 3 for extended precision and large exponent => ok to improve legendre recurrence.
static int test_long_double()
{
volatile real tt; // volatile avoids optimizations.
int p = 0;
tt = 1.0e-1000L; tt *= tt;
if (tt > 0) p |= 1; // bit 0 set for large exponent
tt = 1.0; tt += 1.e-18;
if (tt > 1.0) p |= 2; // bit 1 set for extended precision
long_double_caps = p;
return p;
}
/// \internal high precision version of \ref a_sint_pow_n
static real a_sint_pow_n_hp(real val, real cost, long int n)
{
real s2 = (1.-cost)*(1.+cost); // sin(t)^2 = 1 - cos(t)^2
long int k = n >> 7;
if (sizeof(s2) > 8) k = 0; // enough accuracy, we do not bother.
#ifdef LEG_RANGE_CHECK
if (s2 < 0) shtns_runerr("sin(t)^2 < 0 !!!");
#endif
if (n&1) val *= SQRT(s2); // = sin(t)
do {
if (n&2) val *= s2;
n >>= 1;
s2 *= s2;
} while(n > k);
n >>= 1;
while(n > 0) { // take care of very large power n
n--;
val *= s2;
}
return val; // = sint(t)^n
}
#endif
/// \internal computes val.sin(t)^n from cos(t). ie returns val.(1-x^2)^(n/2), with x = cos(t)
/// assumes: -1 <= cost <= 1, n>=0, and nval<=0 is the extended exponent associated to val.
/// updates nval, and returns val such as the result is val.SHT_SCALE_FACTOR^(nval)
static double a_sint_pow_n_ext(double val, double cost, int n, int *nval)
{
double s2 = (1.-cost)*(1.+cost); // sin(t)^2 = 1 - cos(t)^2 >= 0
double val0 = val; // store sign
int ns2 = 0;
int nv = *nval;
#ifdef LEG_RANGE_CHECK
if (s2 < 0) shtns_runerr("sin(t)^2 < 0 !!!");
#endif
val = fabs(val); // val >= 0
if (n&1) val *= sqrt(s2); // = sin(t)
while (n >>= 1) {
if (n&1) {
if (val < 1.0/SHT_SCALE_FACTOR) { // val >= 0
nv--; val *= SHT_SCALE_FACTOR;
}
val *= s2; nv += ns2; // 1/S^2 < val < 1
}
s2 *= s2; ns2 += ns2;
if (s2 < 1.0/SHT_SCALE_FACTOR) { // s2 >= 0
ns2--; s2 *= SHT_SCALE_FACTOR; // 1/S < s2 < 1
}
}
while ((nv < 0) && (val > 1.0/SHT_SCALE_FACTOR)) { // try to minimize |nv|
++nv; val *= 1.0/SHT_SCALE_FACTOR;
}
if (val0 < 0) val *= -1.0; // restore sign.
*nval = nv;
return val; // 1/S^2 < val < 1
}
/// \internal Returns the value of a legendre polynomial of degree l and order im*MRES, noramalized for spherical harmonics, using recurrence.
/// Requires a previous call to \ref legendre_precomp().
/// Output compatible with the GSL function gsl_sf_legendre_sphPlm(l, m, x)
static double legendre_sphPlm(shtns_cfg shtns, const int l, const int im, const double x)
{
double *al;
int m, ny;
double ymm, ymmp1;
m = im*MRES;
#ifdef LEG_RANGE_CHECK
if ( (l>LMAX) || (l<m) || (im>MMAX) ) shtns_runerr("argument out of range in legendre_sphPlm");
#endif
ny = 0;
al = alm_im(shtns, im);
ymm = al[0];
if (m>0) ymm = a_sint_pow_n_ext(ymm, x, m, &ny); // ny <= 0
ymmp1 = ymm; // l=m
if (l == m) goto done;
ymmp1 = al[1] * (x*ymmp1); // l=m+1
if (l == m+1) goto done;
m+=2; al+=2;
while ((ny < 0) && (m < l)) { // take care of scaled values
ymm = al[1]*(x*ymmp1) + al[0]*ymm;
ymmp1 = al[3]*(x*ymm) + al[2]*ymmp1;
m+=2; al+=4;
if (fabs(ymm) > 1.0/SHT_SCALE_FACTOR) { // rescale when value is significant
++ny; ymm *= 1.0/SHT_SCALE_FACTOR; ymmp1 *= 1.0/SHT_SCALE_FACTOR;
}
}
while (m < l) { // values are unscaled.
ymm = al[1]*(x*ymmp1) + al[0]*ymm;
ymmp1 = al[3]*(x*ymm) + al[2]*ymmp1;
m+=2; al+=4;
}
if (m == l) {
ymmp1 = al[1]*(x*ymmp1) + al[0]*ymm;
}
done:
if (ny < 0) {
if (ny+3 < 0) return 0.0;
do { ymmp1 *= 1.0/SHT_SCALE_FACTOR; } while (++ny < 0); // return unscaled values.
}
return ymmp1;
}
#if HAVE_LONG_DOUBLE_WIDER
static double legendre_sphPlm_hp(shtns_cfg shtns, const int l, const int im, double x)
{
double *al;
int i,m;
real ymm, ymmp1;
if (long_double_caps < 3) legendre_sphPlm(shtns, l, im, x); // not worth it.
m = im*MRES;
#ifdef LEG_RANGE_CHECK
if ( (l>LMAX) || (l<m) || (im>MMAX) ) shtns_runerr("argument out of range in legendre_sphPlm");
#endif
al = alm_im(shtns, im);
ymm = al[0]; // l=m
if (m>0) ymm *= SHT_LEG_SCALEF;
ymmp1 = ymm;
if (l==m) goto done;
ymmp1 = al[1] * (x*ymmp1); // l=m+1
al+=2;
if (l == m+1) goto done;
for (i=m+2; i<l; i+=2) {
ymm = al[1]*(x*ymmp1) + al[0]*ymm;
ymmp1 = al[3]*(x*ymm) + al[2]*ymmp1;
al+=4;
}
if (i==l) {
ymmp1 = al[1]*(x*ymmp1) + al[0]*ymm;
}
done:
if (m>0) ymmp1 *= a_sint_pow_n_hp(1.0/SHT_LEG_SCALEF, x, m);
return ((double) ymmp1);
}
#endif
/// \internal Compute values of legendre polynomials noramalized for spherical harmonics,
/// for a range of l=m..lmax, at given m and x, using recurrence.
/// Requires a previous call to \ref legendre_precomp().
/// Output compatible with the GSL function gsl_sf_legendre_sphPlm_array(lmax, m, x, yl)
/// \param lmax maximum degree computed, \param im = m/MRES with m the SH order, \param x argument, x=cos(theta).
/// \param[out] yl is a double array of size (lmax-m+1) filled with the values.
static void legendre_sphPlm_array(shtns_cfg shtns, const int lmax, const int im, const double x, double *yl)
{
double *al;
int l, m, ny;
double ymm, ymmp1;
m = im*MRES;
#ifdef LEG_RANGE_CHECK
if ( (lmax>LMAX) || (lmax<m) || (im>MMAX) ) shtns_runerr("argument out of range in legendre_sphPlm");
#endif
al = alm_im(shtns, im);
yl -= m; // shift pointer
for (l=m; l<=lmax; ++l) yl[l] = 0.0; // zero out array.
ny = 0;
ymm = al[0];
if (m>0) ymm = a_sint_pow_n_ext(ymm, x, m, &ny); // l=m, ny <= 0
if (ny==0) yl[m] = ymm;
if (lmax==m) return;
ymmp1 = ymm * al[1] * x; // l=m+1
if (ny==0) yl[m+1] = ymmp1;
if (lmax==m+1) return;
l=m+2; al+=2;
while ((ny < 0) && (l < lmax)) { // values are negligible => discard.
ymm = al[1]*(x*ymmp1) + al[0]*ymm;
ymmp1 = al[3]*(x*ymm) + al[2]*ymmp1;
l+=2; al+=4;
if (fabs(ymm) > 1.0/SHT_SCALE_FACTOR) { // rescale when value is significant
++ny; ymm *= 1.0/SHT_SCALE_FACTOR; ymmp1 *= 1.0/SHT_SCALE_FACTOR;
}
}
while (l < lmax) { // values are unscaled => store
ymm = al[1]*(x*ymmp1) + al[0]*ymm;
ymmp1 = al[3]*(x*ymm) + al[2]*ymmp1;
yl[l] = ymm; yl[l+1] = ymmp1;
l+=2; al+=4;
}
if ((l == lmax) && (ny == 0)) {
yl[l] = al[1]*(x*ymmp1) + al[0]*ymm;
}
}
#if HAVE_LONG_DOUBLE_WIDER
/// \internal high precision version of \ref legendre_sphPlm_array
static void legendre_sphPlm_array_hp(shtns_cfg shtns, const int lmax, const int im, const double cost, double *yl)
{
double *al;
long int l,m;
int rescale = 0; // flag for rescale.
real ymm, ymmp1, x;
if (long_double_caps < 3) { // not worth it.
legendre_sphPlm_array(shtns, lmax, im, cost, yl); return;
}
m = im*MRES;
#ifdef LEG_RANGE_CHECK
if ((lmax > LMAX)||(lmax < m)||(im>MMAX)) shtns_runerr("argument out of range in legendre_sphPlm_array");
#endif
x = cost;
al = alm_im(shtns, im);
yl -= m; // shift pointer
ymm = al[0]; // l=m
if (m>0) {
if ((lmax <= SHT_L_RESCALE) || (sizeof(ymm) > 8)) {
ymm = a_sint_pow_n_hp(ymm, x, m);
} else {
rescale = 1;
ymm *= SHT_LEG_SCALEF;
}
}
yl[m] = ymm;
if (lmax==m) goto done; // done.
ymmp1 = ymm * al[1] * x; // l=m+1
yl[m+1] = ymmp1;
al+=2;
if (lmax==m+1) goto done; // done.
for (l=m+2; l<lmax; l+=2) {
ymm = al[1]*(x*ymmp1) + al[0]*ymm;
ymmp1 = al[3]*(x*ymm) + al[2]*ymmp1;
yl[l] = ymm;
yl[l+1] = ymmp1;
al+=4;
}
if (l==lmax) {
yl[l] = al[1]*(x*ymmp1) + al[0]*ymm;
}
done:
if (rescale != 0) {
ymm = a_sint_pow_n_hp(1.0/SHT_LEG_SCALEF, x, m);
for (l=m; l<=lmax; ++l) { // rescale.
yl[l] *= ymm;
}
}
return;
}
#endif
/// \internal Compute values of a legendre polynomial normalized for spherical harmonics derivatives, for a range of l=m..lmax, using recurrence.
/// Requires a previous call to \ref legendre_precomp(). Output is not directly compatible with GSL :
/// - if m=0 : returns ylm(x) and d(ylm)/d(theta) = -sin(theta)*d(ylm)/dx
/// - if m>0 : returns ylm(x)/sin(theta) and d(ylm)/d(theta).
/// This way, there are no singularities, everything is well defined for x=[-1,1], for any m.
/// \param lmax maximum degree computed, \param im = m/MRES with m the SH order, \param x argument, x=cos(theta).
/// \param sint = sqrt(1-x^2) to avoid recomputation of sqrt.
/// \param[out] yl is a double array of size (lmax-m+1) filled with the values (divided by sin(theta) if m>0)
/// \param[out] dyl is a double array of size (lmax-m+1) filled with the theta-derivatives.
static void legendre_sphPlm_deriv_array(shtns_cfg shtns, const int lmax, const int im, const double x, const double sint, double *yl, double *dyl)
{
double *al;
int l,m, ny;
double st, y0, y1, dy0, dy1;
m = im*MRES;
#ifdef LEG_RANGE_CHECK
if ((lmax > LMAX)||(lmax < m)||(im>MMAX)) shtns_runerr("argument out of range in legendre_sphPlm_deriv_array");
#endif
al = alm_im(shtns, im);
yl -= m; dyl -= m; // shift pointers
for (l=m; l<=lmax; ++l) {
yl[l] = 0.0; dyl[l] = 0.0; // zero out arrays.
}
ny = 0;
st = sint;
y0 = al[0];
dy0 = 0.0;
if (m>0) {
y0 = a_sint_pow_n_ext(y0, x, m-1, &ny);
dy0 = x*m*y0;
st *= st; // st = sin(theta)^2 is used in the recurrence for m>0
}
if (ny==0) {
yl[m] = y0; dyl[m] = dy0; // l=m
}
if (lmax==m) return; // done.
y1 = al[1] * (x * y0);
dy1 = al[1]*( x*dy0 - st*y0 );
if (ny == 0) {
yl[m+1] = y1; dyl[m+1] = dy1; // l=m+1
}
if (lmax==m+1) return; // done.
l=m+2; al+=2;
while ((ny < 0) && (l < lmax)) { // values are negligible => discard.
y0 = al[1]*(x*y1) + al[0]*y0;
dy0 = al[1]*(x*dy1 - y1*st) + al[0]*dy0;
y1 = al[3]*(x*y0) + al[2]*y1;
dy1 = al[3]*(x*dy0 - y0*st) + al[2]*dy1;
l+=2; al+=4;
if (fabs(y0) > 1.0/SHT_SCALE_FACTOR) { // rescale when value is significant
++ny; y0 *= 1.0/SHT_SCALE_FACTOR; dy0 *= 1.0/SHT_SCALE_FACTOR;
y1 *= 1.0/SHT_SCALE_FACTOR; dy1 *= 1.0/SHT_SCALE_FACTOR;
}
}
while (l < lmax) { // values are unscaled => store.
y0 = al[1]*(x*y1) + al[0]*y0;
dy0 = al[1]*(x*dy1 - y1*st) + al[0]*dy0;
y1 = al[3]*(x*y0) + al[2]*y1;
dy1 = al[3]*(x*dy0 - y0*st) + al[2]*dy1;
yl[l] = y0; dyl[l] = dy0;
yl[l+1] = y1; dyl[l+1] = dy1;
l+=2; al+=4;
}
if ((l==lmax) && (ny == 0)) {
yl[l] = al[1]*(x*y1) + al[0]*y0;
dyl[l] = al[1]*(x*dy1 - y1*st) + al[0]*dy0;
}
}
#if HAVE_LONG_DOUBLE_WIDER
/// \internal high precision version of \ref legendre_sphPlm_deriv_array
static void legendre_sphPlm_deriv_array_hp(shtns_cfg shtns, const int lmax, const int im, const double cost, const double sint, double *yl, double *dyl)
{
double *al;
long int l,m;
int rescale = 0; // flag for rescale.
real x, st, y0, y1, dy0, dy1;
if (long_double_caps < 3) { // not worth it.
legendre_sphPlm_deriv_array(shtns, lmax, im, cost, sint, yl, dyl); return;
}
x = cost;
m = im*MRES;
#ifdef LEG_RANGE_CHECK
if ((lmax > LMAX)||(lmax < m)||(im>MMAX)) shtns_runerr("argument out of range in legendre_sphPlm_deriv_array");
#endif
al = alm_im(shtns, im);
yl -= m; dyl -= m; // shift pointers
st = sint;
y0 = al[0];
dy0 = 0.0;
if (m>0) { // m > 0
l = m-1;
if ((lmax <= SHT_L_RESCALE) || (sizeof(y0) > 8)) {
if (l&1) {
y0 = a_sint_pow_n_hp(y0, x, l-1) * sint; // avoid computation of sqrt
} else y0 = a_sint_pow_n_hp(y0, x, l);
} else {
rescale = 1;
y0 *= SHT_LEG_SCALEF;
}
dy0 = x*m*y0;
st *= st; // st = sin(theta)^2 is used in the recurrence for m>0
}
yl[m] = y0; // l=m
dyl[m] = dy0;
if (lmax==m) goto done; // done.
y1 = al[1] * (x * y0);
dy1 = al[1]*( x*dy0 - st*y0 );
yl[m+1] = y1; // l=m+1
dyl[m+1] = dy1;
if (lmax==m+1) goto done; // done.
al+=2;
for (l=m+2; l<lmax; l+=2) {
y0 = al[1]*(x*y1) + al[0]*y0;
dy0 = al[1]*(x*dy1 - y1*st) + al[0]*dy0;
yl[l] = y0; dyl[l] = dy0;
y1 = al[3]*(x*y0) + al[2]*y1;
dy1 = al[3]*(x*dy0 - y0*st) + al[2]*dy1;
yl[l+1] = y1; dyl[l+1] = dy1;
al+=4;
}
if (l==lmax) {
yl[l] = al[1]*(x*y1) + al[0]*y0;
dyl[l] = al[1]*(x*dy1 - y1*st) + al[0]*dy0;
}
done:
if (rescale != 0) {
l = m-1; // compute sin(theta)^(m-1)
if (l&1) {
y0 = a_sint_pow_n_hp(1.0/SHT_LEG_SCALEF, x, l-1) * sint; // avoid computation of sqrt
} else y0 = a_sint_pow_n_hp(1.0/SHT_LEG_SCALEF, x, l);
for (l=m; l<=lmax; ++l) {
yl[l] *= y0; dyl[l] *= y0; // rescale
}
}
return;
}
#endif
/// same as legendre_sphPlm_deriv_array for x=0 and sint=1 (equator)
static void legendre_sphPlm_deriv_array_equ(shtns_cfg shtns, const int lmax, const int im, double *yl, double *dyl)
{
double *al;
int l,m;
double y0, dy1;
m = im*MRES;
#ifdef LEG_RANGE_CHECK
if ((lmax > LMAX)||(lmax < m)||(im>MMAX)) shtns_runerr("argument out of range in legendre_sphPlm_deriv_array");
#endif
al = alm_im(shtns, im);
yl -= m; dyl -= m; // shift pointers
y0 = al[0];
yl[m] = y0; dyl[m] = 0.0; // l=m
if (lmax==m) return; // done.
dy1 = -al[1]*y0;
yl[m+1] = 0.0; dyl[m+1] = dy1; // l=m+1
if (lmax==m+1) return; // done.
l=m+2; al+=2;
while (l < lmax) {
y0 = al[0]*y0;
dy1 = al[2]*dy1 - al[3]*y0;
yl[l] = y0; dyl[l] = 0.0;
yl[l+1] = 0.0; dyl[l+1] = dy1;
l+=2; al+=4;
}
if (l==lmax) {
yl[l] = al[0]*y0;
dyl[l] = 0.0;
}
}
/// \internal Precompute constants for the recursive generation of Legendre associated functions, with given normalization.
/// this function is called by \ref shtns_set_size, and assumes up-to-date values in \ref shtns.
/// For the same conventions as GSL, use \c legendre_precomp(sht_orthonormal,1);
/// \param[in] norm : normalization of the associated legendre functions (\ref shtns_norm).
/// \param[in] with_cs_phase : Condon-Shortley phase (-1)^m is included (1) or not (0)
/// \param[in] mpos_renorm : Optional renormalization for m>0.
/// 1.0 (no renormalization) is the "complex" convention, while 0.5 leads to the "real" convention (with FFTW).
static void legendre_precomp(shtns_cfg shtns, enum shtns_norm norm, int with_cs_phase, double mpos_renorm)
{
double *alm, *blm;
long int im, m, l, lm;
real t1, t2;
#if HAVE_LONG_DOUBLE_WIDER
test_long_double();
#endif
#if SHT_VERBOSE > 1
if (verbose) {
printf(" > Condon-Shortley phase = %d, normalization = %d\n", with_cs_phase, norm);
if (long_double_caps == 3) printf(" > long double has extended precision and large exponent\n");
}
#endif
if (with_cs_phase != 0) with_cs_phase = 1; // force to 1 if !=0
alm = (double *) malloc( (2*NLM)*sizeof(double) );
blm = alm;
if ((norm == sht_schmidt) || (mpos_renorm != 1.0)) {
blm = (double *) malloc( (2*NLM)*sizeof(double) );
}
if ((alm==0) || (blm==0)) shtns_runerr("not enough memory.");
shtns->alm = alm; shtns->blm = blm;
/// - Compute and store the prefactor (independant of x) of the starting value for the recurrence :
/// \f[ Y_m^m(x) = Y_0^0 \ \sqrt{ \prod_{k=1}^{m} \frac{2k+1}{2k} } \ \ (-1)^m \ (1-x^2)^{m/2} \f]
if ((norm == sht_fourpi)||(norm == sht_schmidt)) {
t1 = 1.0;
alm[0] = t1; /// \f$ Y_0^0 = 1 \f$ for Schmidt or 4pi-normalized
} else {
t1 = 0.25L/M_PIl;
alm[0] = SQRT(t1); /// \f$ Y_0^0 = 1/\sqrt{4\pi} \f$ for orthonormal
}
t1 *= mpos_renorm; // renormalization for m>0
for (im=1, m=0; im<=MMAX; ++im) {
while(m<im*MRES) {
++m;
t1 *= ((real)m + 0.5)/m; // t1 *= (m+0.5)/m;
}
t2 = SQRT(t1);
if ( m & with_cs_phase ) t2 = -t2; /// optional \f$ (-1)^m \f$ Condon-Shortley phase.
alm_im(shtns, im)[0] = t2;
}
/// - Precompute the factors alm and blm of the recurrence relation :
#pragma omp parallel for private(im,m,l,lm, t1, t2) schedule(dynamic)
for (im=0; im<=MMAX; ++im) {
m = im*MRES;
lm = im*(2*LMAX - (im-1)*MRES);
if (norm == sht_schmidt) { /// <b> For Schmidt semi-normalized </b>
t2 = SQRT(2*m+1);
alm[lm] /= t2; /// starting value divided by \f$ \sqrt{2m+1} \f$
alm[lm+1] = t2; // l=m+1
lm+=2;
for (l=m+2; l<=LMAX; ++l) {
t1 = SQRT((l+m)*(l-m));
alm[lm+1] = (2*l-1)/t1; /// \f[ a_l^m = \frac{2l-1}{\sqrt{(l+m)(l-m)}} \f]
alm[lm] = - t2/t1; /// \f[ b_l^m = -\sqrt{\frac{(l-1+m)(l-1-m)}{(l+m)(l-m)}} \f]
t2 = t1; lm+=2;
}
} else { /// <b> For orthonormal or 4pi-normalized </b>
t2 = 2*m+1;
// starting value unchanged.
alm[lm+1] = SQRT(2*m+3); // l=m+1
lm+=2;
for (l=m+2; l<=LMAX; ++l) {
t1 = (l+m)*(l-m);
alm[lm+1] = SQRT(((2*l+1)*(2*l-1))/t1); /// \f[ a_l^m = \sqrt{\frac{(2l+1)(2l-1)}{(l+m)(l-m)}} \f]
alm[lm] = - SQRT(((2*l+1)*t2)/((2*l-3)*t1)); /// \f[ b_l^m = -\sqrt{\frac{2l+1}{2l-3}\,\frac{(l-1+m)(l-1-m)}{(l+m)(l-m)}} \f]
t2 = t1; lm+=2;
}
}
/// - Compute analysis recurrence coefficients if necessary
if ((norm == sht_schmidt) || (mpos_renorm != 1.0)) {
lm = im*(2*LMAX - (im-1)*MRES);
t1 = 1.0;
t2 = alm[lm+1];
if (norm == sht_schmidt) {
t1 = 2*m+1;
t2 *= (2*m+3)/t1;
}
if (m>0) t1 /= mpos_renorm;
blm[lm] = alm[lm]*t1;
blm[lm+1] = t2;
lm+=2;
for (l=m+2; l<=LMAX; ++l) {
t1 = alm[lm];
t2 = alm[lm+1];
if (norm == sht_schmidt) {
double t3 = 2*l+1;
t1 *= t3/(2*l-3);
t2 *= t3/(2*l-1);
}
blm[lm] = t1;
blm[lm+1] = t2;
lm+=2;
}
}
}
}
/// \internal returns the value of the Legendre Polynomial of degree l.
/// l is arbitrary, a direct recurrence relation is used, and a previous call to legendre_precomp() is not required.
static double legendre_Pl(const int l, double x)
{
long int i;
real p, p0, p1;
if ((l==0)||(x==1.0)) return ( 1. );
if (x==-1.0) return ( (l&1) ? -1. : 1. );
p0 = 1.0; /// \f$ P_0 = 1 \f$
p1 = x; /// \f$ P_1 = x \f$
for (i=2; i<=l; ++i) { /// recurrence : \f[ l P_l = (2l-1) x P_{l-1} - (l-1) P_{l-2} \f] (works ok up to l=100000)
p = ((x*p1)*(2*i-1) - (i-1)*p0)/i;
p0 = p1;
p1 = p;
}
return ((double) p1);
}
/// \internal Generates the abscissa and weights for a Gauss-Legendre quadrature.
/// Newton method from initial Guess to find the zeros of the Legendre Polynome
/// \param x = abscissa, \param w = weights, \param n points.
/// \note Reference: Numerical Recipes, Cornell press.
static void gauss_nodes(real *x, real *w, int n)
{
long int i,l,m, k;
real z, z1, p1, p2, p3, pp;
real eps;
eps = 2.3e-16; // desired precision, minimum = 2.2204e-16 (double)
if ((sizeof(eps) > 8) && (long_double_caps > 1)) eps = 1.1e-19; // desired precision, minimum = 1.0842e-19 (long double i387)
m = (n+1)/2;
for (i=1;i<=m;++i) {
k=10; // maximum Newton iteration count to prevent infinite loop.
p1 = M_PIl; p2 = 2*n;
z = (1.0 - (n-1)/(p2*p2*p2)) * COS((p1*(4*i-1))/(4*n+2)); // initial guess
do {
p1 = z; // P_1
p2 = 1.0; // P_0
for(l=2;l<=n;++l) { // recurrence : l P_l = (2l-1) z P_{l-1} - (l-1) P_{l-2} (works ok up to l=100000)
p3 = p2;
p2 = p1;
p1 = ((2*l-1)*z*p2 - (l-1)*p3)/l; // The Legendre polynomial...
}
pp = ((1.-z)*(1.+z))/(n*(p2-z*p1)); // ... and its inverse derivative.
z1 = z;
z -= p1*pp; // Newton's method
} while (( FABS(z-z1) > (z1+z)*0.5*eps ) && (--k > 0));
x[i-1] = z; // Build up the abscissas.
w[i-1] = 2.0*pp*pp/((1.-z)*(1.+z)); // Build up the weights.
x[n-i] = -z;
w[n-i] = w[i-1];
}
if (n&1) x[n/2] = 0.0; // exactly zero.
#if SHT_VERBOSE > 1
// test integral to compute :
if (verbose) {
z = 0;
for (i=0;i<m;++i) {
z += w[i]*x[i]*x[i];
}
#ifndef HAVE_LONG_DOUBLE_WIDER
printf(" Gauss quadrature for 3/2.x^2 = %g (should be 1.0) error = %g\n",z*3.,z*3.-1.0);
#else
printf(" Gauss quadrature for 3/2.x^2 = %Lg (should be 1.0) error = %Lg\n",z*3.,z*3.-1.0);
#endif
}
#endif
// as we started with initial guesses, we should check if the gauss points are actually unique.
for (i=m-1; i>0; i--) {
if (((double) x[i]) == ((double) x[i-1])) shtns_runerr("bad gauss points");
}
}
|
valid.yolo7.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_512_34_34_256_3_3.h"
#include "gen_ukr_A4B2gemm_1_512_34_34_256_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 34;
int Ny = 34;
int Nh = 3;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<256+0;c5+=256)
{
for(int xy5=0;xy5<1156+0;xy5+=1156)
{
for(int f5=0;f5<512+0;f5+=512)
{
for(int xy4=xy5;xy4<min(1156, 1156+xy5);xy4+=1156)
{
for(int f4=f5;f4<min(512, 512+f5);f4+=512)
{
for(int c4=c5;c4<min(256, 256+c5);c4+=256)
{
for(int c3=c4;c3<min(256, 256+c4);c3+=Tc1)
{
for(int xy3=xy4;xy3<min(1156, 1156+xy4);xy3+=Txy3)
{
for(int f3=f4;f3<min(512, 512+f4);f3+=Tf2)
{
for(int xy2=xy3;xy2<min(1156, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(512, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(256, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(256, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(1156, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(512, 16+f2);f1+=16)
{
int ctile=min(Tc1, 256-c1);
int x1=xy1/34;
int y1=xy1%34/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*331776+c1_1*1296+1*x1*36+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*36864+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*591872+of1_1*1156+x1*34+y1*1+of1_2*1;
if(34-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(34*34-xy1>=6){
for(int sti=34-y1;sti<6;sti+=1)
{
Astrides[sti]+=2;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=34-y1;sti<6;sti+=1)
{
Astrides[sti]-=2;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
polybench.c | /**
* polybench.c: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
/* By default, collect PAPI counters on thread 0. */
#ifndef POLYBENCH_THREAD_MONITOR
# define POLYBENCH_THREAD_MONITOR 0
#endif
/* Total LLC cache size. By default 32+MB.. */
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 32770
#endif
int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR;
double polybench_program_total_flops = 0;
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
char* _polybench_papi_eventlist[] = {
#include "papi_counters.list"
NULL
};
int polybench_papi_eventset;
int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS];
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
/* Timer code (RDTSC). */
unsigned long long int polybench_c_start, polybench_c_end;
static
double rtclock()
{
#ifdef POLYBENCH_TIME
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf ("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
#else
return 0;
#endif
}
#ifdef POLYBENCH_CYCLE_ACCURATE_TIMER
static
unsigned long long int rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
return ret;
}
#endif
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER);
sched_setscheduler (0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
static
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line);
else
{
fprintf (stdout,"%-40s SKIPPED\n", file);
fprintf (stdout,"Line # %d\n", line);
}
if (retval == PAPI_ESYS)
{
sprintf (buf, "System error in %s", call);
perror (buf);
}
else if (retval > 0)
fprintf (stdout,"Error: %s\n", call);
else if (retval == 0)
fprintf (stdout,"Error: %s\n", call);
else
{
char errstring[PAPI_MAX_STR_LEN];
sprintf( errstring, "%d", retval );
PAPI_perror( errstring );
fprintf (stdout,"Error in %s: %s\n", call, errstring);
}
fprintf (stdout,"\n");
if (PAPI_is_initialized ())
PAPI_shutdown ();
exit (1);
}
void polybench_papi_init()
{
# ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
if (omp_get_max_threads () < polybench_papi_counters_threadid)
polybench_papi_counters_threadid = omp_get_max_threads () - 1;
}
#pragma omp barrier
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail (__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval);
int k;
for (k = 0; _polybench_papi_eventlist[k]; ++k)
{
if ((retval =
PAPI_event_name_to_code (_polybench_papi_eventlist[k],
&(polybench_papi_eventlist[k])))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval);
}
polybench_papi_eventlist[k] = 0;
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_close()
{
# ifdef _OPENMP
#pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized ())
PAPI_shutdown ();
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr);
if (PAPI_add_event (polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_add_event", 1);
if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo)
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_start", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
return 0;
}
void polybench_papi_stop_counter(int evid)
{
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read (polybench_papi_eventset, &values[0]))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event
(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_print()
{
int verbose = 0;
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num() == polybench_papi_counters_threadid)
{
#ifdef POLYBENCH_PAPI_VERBOSE
verbose = 1;
#endif
if (verbose)
printf ("On thread %d:\n", polybench_papi_counters_threadid);
#endif
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
{
if (verbose)
printf ("%s=", _polybench_papi_eventlist[evid]);
printf ("%llu ", polybench_papi_values[evid]);
if (verbose)
printf ("\n");
}
printf ("\n");
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
#endif
/* ! POLYBENCH_PAPI */
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler ();
#endif
}
void polybench_timer_start()
{
polybench_prepare_instruments ();
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_start = rtclock ();
#else
polybench_c_start = rdtsc ();
#endif
}
void polybench_timer_stop()
{
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_end = rtclock ();
#else
polybench_c_end = rdtsc ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler ();
#endif
}
void polybench_timer_print()
{
#ifdef POLYBENCH_GFLOPS
if (__polybench_program_total_flops == 0)
{
printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n");
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
else
printf ("%0.2lf\n",
(__polybench_program_total_flops /
(double)(polybench_t_end - polybench_t_start)) / 1000000000);
#else
# ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
printf ("%0.6f\n", polybench_t_end - polybench_t_start);
# else
printf ("%Ld\n", polybench_c_end - polybench_c_start);
# endif
#endif
}
static
void *
xmalloc (size_t num)
{
void* new = NULL;
int ret = posix_memalign (&new, 32, num);
if (! new || ret)
{
fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory");
exit (1);
}
return new;
}
void* polybench_alloc_data(unsigned long long int n, int elt_size)
{
/// FIXME: detect overflow!
size_t val = n;
val *= elt_size;
void* ret = xmalloc (val);
return ret;
}
|
for_schedule_static.c | /*
*
* For static scheduling we check whether the chunks have the requested size,
* with the legal exception of the last chunk.
* Modified by Chunhua Liao
*/
#include <stdio.h>
#include <omp.h>
#include <unistd.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
#define CFSMAX_SIZE 1000
/****************************************************************/
int
check_for_schedule_static (FILE * logFile)
{
const int chunk_size = 7;
int threads;
int tids[CFSMAX_SIZE];
int i;
int result = 0;
int ii = 0;
int tid;
#pragma omp parallel
{ /* begin of parallel */
#pragma omp single
{
threads = omp_get_num_threads ();
}
} /* end of parallel */
if (threads < 2)
{
printf ("This test only works with at least two threads");
fprintf (logFile, "This test only works with at least two threads");
return 0;
}
else
{
fprintf (logFile,
"Using an internal count of %d\nUsing a specified chunksize of %d\n",
CFSMAX_SIZE, chunk_size);
#pragma omp parallel shared(tids) private(tid)
{ /* begin of parallel */
tid = omp_get_thread_num ();
#pragma omp for schedule(static,chunk_size)
for (i = 0; i < CFSMAX_SIZE; i++)
{
tids[i] = tid;
}
} /* end of parallel */
/*
printf("debug---------------------\n");
for (i=0;i<CFSMAX_SIZE -1;i++)
printf("%d ",tids[i]);
printf("End of debug---------------------\n");
*/
for (i = 0; i < CFSMAX_SIZE; ++i)
{
ii= (i/chunk_size) % threads; /*round-robin for static chunk*/
if (tids[i] != ii)
{
result++;
fprintf (logFile,
"Iteration %d should be assigned to %d instead of %d\n",
i,ii,tids[i]);
}
}
/*printf("Alles OK beim Test von schedule(static)\n"); */
return (result==0);
}
}
/****************************************************************/
int
crosscheck_for_schedule_static (FILE * logFile)
{
const int chunk_size = 7;
int threads;
int tids[CFSMAX_SIZE];
int i;
int result = 0;
int ii = 0;
int tid;
#pragma omp parallel
{ /* begin of parallel */
#pragma omp single
{
threads = omp_get_num_threads ();
}
} /* end of parallel */
if (threads < 2)
{
printf ("This test only works with at least two threads");
fprintf (logFile, "This test only works with at least two threads");
return 0;
}
else
{
fprintf (logFile,
"Using an internal count of %d\nUsing a specified chunksize of %d\n",
CFSMAX_SIZE, chunk_size);
#pragma omp parallel shared(tids) private(tid)
{ /* begin of parallel */
tid = omp_get_thread_num ();
#pragma omp for
for (i = 0; i < CFSMAX_SIZE; i++)
{
tids[i] = tid;
}
} /* end of parallel */
/*
printf("debug---------------------\n");
for (i=0;i<CFSMAX_SIZE -1;i++)
printf("%d ",tids[i]);
printf("End of debug---------------------\n");
*/
for (i = 0; i < CFSMAX_SIZE; ++i)
{
ii= (i/chunk_size) % threads; /*round-robin for static chunk*/
if (tids[i] != ii)
{
result++;
fprintf (logFile,
"Iteration %d should be assigned to %d instead of %d\n",
i,ii,tids[i]);
}
}
/*printf("Alles OK beim Test von schedule(static)\n"); */
return (result==0);
}
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (1024*3)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i+1;})
#define ZERO(X) ZERO_ARRAY(N, X)
#define DUMP_SUCCESS9() { \
DUMP_SUCCESS(gpu_threads-max_threads); \
DUMP_SUCCESS(gpu_threads-max_threads); \
DUMP_SUCCESS(gpu_threads-max_threads); \
DUMP_SUCCESS(gpu_threads-max_threads); \
DUMP_SUCCESS(gpu_threads-max_threads); \
DUMP_SUCCESS(gpu_threads-max_threads); \
DUMP_SUCCESS(gpu_threads-max_threads); \
DUMP_SUCCESS(gpu_threads-max_threads); \
DUMP_SUCCESS(gpu_threads-max_threads); \
}
//
// FIXME:
// Add support for 'shared', 'lastprivate'
//
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
double S[N];
double p[2];
int cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
int gpu_threads = 224;
int cpu_threads = 32;
int max_threads = cpuExec ? cpu_threads : gpu_threads;
INIT();
//
// Test: proc_bind clause
//
#undef TARGET_PARALLEL_FOR_CLAUSES
#define TARGET_PARALLEL_FOR_CLAUSES proc_bind(master)
#include "tpf_defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
TARGET_PARALLEL_FOR1(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR2(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR3(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR4(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR5(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR6(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR7(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR8(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR9(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
}
DUMP_SUCCESS9()
#undef TARGET_PARALLEL_FOR_CLAUSES
#define TARGET_PARALLEL_FOR_CLAUSES proc_bind(close)
#include "tpf_defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
TARGET_PARALLEL_FOR1(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR2(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR3(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR4(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR5(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR6(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR7(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR8(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR9(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
}
DUMP_SUCCESS9()
#undef TARGET_PARALLEL_FOR_CLAUSES
#define TARGET_PARALLEL_FOR_CLAUSES proc_bind(spread)
#include "tpf_defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
TARGET_PARALLEL_FOR1(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR2(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR3(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR4(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR5(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR6(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR7(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR8(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR9(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
}
DUMP_SUCCESS9()
//
// Test: private, shared clauses on omp target parallel for.
//
#undef TARGET_PARALLEL_FOR_CLAUSES
#define TARGET_PARALLEL_FOR_CLAUSES private(p,q) shared(A,B,C,D,E)
#include "tpf_defines.h"
// FIXME: shared(a) where 'a' is an implicitly mapped scalar does not work.
// FIXME: shared(A) private(A) does not generate correct results.
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
TARGET_PARALLEL_FOR1(
double p = 2; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[i] += p; \
B[i] += q; \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + N/2*(N+1)))
TARGET_PARALLEL_FOR2(
double p = 2; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[i] += p; \
B[i] += q; \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + N/2*(N+1)))
TARGET_PARALLEL_FOR3(
double p = 2; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[i] += p; \
B[i] += q; \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + N/2*(N+1)))
TARGET_PARALLEL_FOR4(
double p = 2; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[i] += p; \
B[i] += q; \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + N/2*(N+1)))
TARGET_PARALLEL_FOR5(
double p = 2; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[i] += p; \
B[i] += q; \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + N/2*(N+1)))
TARGET_PARALLEL_FOR6(
double p = 2; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[i] += p; \
B[i] += q; \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + N/2*(N+1)))
TARGET_PARALLEL_FOR7(
double p = 2; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[i] += p; \
B[i] += q; \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + N/2*(N+1)))
TARGET_PARALLEL_FOR8(
double p = 2; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[i] += p; \
B[i] += q; \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + N/2*(N+1)))
TARGET_PARALLEL_FOR9(
double p = 2; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[i] += p; \
B[i] += q; \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + N/2*(N+1)))
}
DUMP_SUCCESS9()
//
// Test: firstprivate clause on omp target parallel for.
//
#undef TARGET_PARALLEL_FOR_CLAUSES
#define TARGET_PARALLEL_FOR_CLAUSES firstprivate(p,q)
#include "tpf_defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
TARGET_PARALLEL_FOR1(
double p = -4; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p; \
B[i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR2(
double p = -4; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p; \
B[i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR3(
double p = -4; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p; \
B[i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR4(
double p = -4; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p; \
B[i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR5(
double p = -4; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p; \
B[i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR6(
double p = -4; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p; \
B[i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR7(
double p = -4; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p; \
B[i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR8(
double p = -4; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p; \
B[i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
TARGET_PARALLEL_FOR9(
double p = -4; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p; \
B[i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
},
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N/2*(N+1)))
}
DUMP_SUCCESS9()
#if 0
//
// Test: lastprivate clause on omp target parallel for.
//
#undef TARGET_PARALLEL_FOR_CLAUSES
#define TARGET_PARALLEL_FOR_CLAUSES lastprivate(q)
#include "tpf_defines.h"
// FIXME: modify to t=1 and in tpf_defines.h to use host after bug fix.
// FIXME: variable is not private.
for (int t = 2; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
TARGET_PARALLEL_FOR1(
double p[1]; \
double q[1]; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p[0] = C[i] + D[i]; \
q[0] = D[i] + E[i]; \
A[i] = p[0]; \
B[i] = q[0]; \
},
{
double tmp = p[0] + q[0];
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], N+1+ N/2*(N+1)))
}
FIXME: private of non-scalar does not work.
//
// Test: private clause on omp parallel for.
//
#undef PARALLEL_FOR_CLAUSES
#define PARALLEL_FOR_CLAUSES private(p)
#include "tpf_defines.h"
for (int t = 0; t <= 224; t++) {
int threads[1]; threads[0] = t;
PARALLEL_FOR(
p[0] = 2; p[1] = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p[0] = C[i] + D[i]; \
p[1] = D[i] + E[i]; \
A[i] += p[0]; \
B[i] += p[1]; \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1))))
}
FIXME: private of non-scalar does not work.
//
// Test: firstprivate clause on omp parallel for.
//
#undef PARALLEL_FOR_CLAUSES
#define PARALLEL_FOR_CLAUSES firstprivate(p)
#include "tpf_defines.h"
for (int t = 0; t <= 224; t++) {
int threads[1]; threads[0] = t;
PARALLEL_FOR(
p[0] = -4; p[1] = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p[0]; \
B[i] += D[i] + E[i] + p[1]; \
if (i == N-1) { \
p[0] += 6; \
p[1] += 9; \
} \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], SUMS * (N/2*(N+1))))
}
#endif
//
// Test: collapse clause on omp target parallel for.
//
#undef TARGET_PARALLEL_FOR_CLAUSES
#define TARGET_PARALLEL_FOR_CLAUSES collapse(2)
#include "tpf_defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
TARGET_PARALLEL_FOR1(
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[i*3+j] += C[i*3+j] + D[i*3+j]; \
B[i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], (N/2*(N+1))))
TARGET_PARALLEL_FOR2(
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[i*3+j] += C[i*3+j] + D[i*3+j]; \
B[i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], (N/2*(N+1))))
TARGET_PARALLEL_FOR3(
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[i*3+j] += C[i*3+j] + D[i*3+j]; \
B[i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], (N/2*(N+1))))
TARGET_PARALLEL_FOR4(
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[i*3+j] += C[i*3+j] + D[i*3+j]; \
B[i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], (N/2*(N+1))))
TARGET_PARALLEL_FOR5(
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[i*3+j] += C[i*3+j] + D[i*3+j]; \
B[i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], (N/2*(N+1))))
TARGET_PARALLEL_FOR6(
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[i*3+j] += C[i*3+j] + D[i*3+j]; \
B[i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], (N/2*(N+1))))
TARGET_PARALLEL_FOR7(
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[i*3+j] += C[i*3+j] + D[i*3+j]; \
B[i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], (N/2*(N+1))))
TARGET_PARALLEL_FOR8(
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[i*3+j] += C[i*3+j] + D[i*3+j]; \
B[i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], (N/2*(N+1))))
TARGET_PARALLEL_FOR9(
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[i*3+j] += C[i*3+j] + D[i*3+j]; \
B[i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i] - 1;
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], (N/2*(N+1))))
}
DUMP_SUCCESS9()
//
// Test: Ensure coalesced scheduling on GPU.
//
if (cpuExec == 0) {
#undef TARGET_PARALLEL_FOR_CLAUSES
#define TARGET_PARALLEL_FOR_CLAUSES
#include "tpf_defines.h"
int threads[1]; threads[0] = 32;
TARGET_PARALLEL_FOR1(
S[0] = 0; \
for (int i = 0; i < 96; i++) { \
A[i] = 0; \
} \
,
for (int i = 0; i < 96; i++) { \
A[i] += i - omp_get_thread_num(); \
}
,
{
double tmp = 0;
for (int i = 0; i < 96; i++) {
tmp += A[i];
}
S[0] = tmp;
},
VERIFY(0, 1, S[0], (32*32 + 64*32) ))
TARGET_PARALLEL_FOR2(
S[0] = 0; \
for (int i = 0; i < 96; i++) { \
A[i] = 0; \
} \
,
for (int i = 0; i < 96; i++) { \
A[i] += i - omp_get_thread_num(); \
}
,
{
double tmp = 0;
for (int i = 0; i < 96; i++) {
tmp += A[i];
}
S[0] = tmp;
},
VERIFY(0, 1, S[0], (32*32 + 64*32) ))
TARGET_PARALLEL_FOR7(
S[0] = 0; \
for (int i = 0; i < 96; i++) { \
A[i] = 0; \
} \
,
for (int i = 0; i < 96; i++) { \
A[i] += i - omp_get_thread_num(); \
}
,
{
double tmp = 0;
for (int i = 0; i < 96; i++) {
tmp += A[i];
}
S[0] = tmp;
},
VERIFY(0, 1, S[0], (32*32 + 64*32) ))
} else {
DUMP_SUCCESS(3);
}
return 0;
}
|
texcoord_optimization.h | // to be included in the library
#ifndef __VCGLIB__TEXTCOOORD_OPTIMIZATION
#define __VCGLIB__TEXTCOOORD_OPTIMIZATION
#include <vcg/container/simple_temporary_data.h>
#ifdef _USE_OMP
#include <omp.h>
#endif
/*
SINGLE PATCH TEXTURE OPTIMIZATIONS
A set of classes to perform optimizations of disk->disk parametrizations.
Requires texture coords to be defined per vertex (do replicate seams!).
*/
namespace vcg
{
namespace tri
{
// helper function (checks that coords are inside -1..+1)
template <class ScalarType>
bool testParamCoordsPoint(const vcg::Point2<ScalarType> &p)
{
ScalarType eps=(ScalarType)0.00001;
if (!((p.X()>=-1-eps)&&(p.X()<=1+eps)&&
(p.Y()>=-1-eps)&&(p.Y()<=1+eps)))
return (false);
return true;
}
/* Base class for all Texture Optimizers*/
template<class MESH_TYPE>
class TexCoordOptimization{
protected:
MESH_TYPE &m;
SimpleTempData<typename MESH_TYPE::VertContainer, int > isFixed;
public:
/* Tpyes */
typedef MESH_TYPE MeshType;
typedef typename MESH_TYPE::VertexIterator VertexIterator;
typedef typename MESH_TYPE::FaceIterator FaceIterator;
typedef typename MESH_TYPE::VertexType VertexType;
typedef typename MESH_TYPE::FaceType FaceType;
typedef typename MESH_TYPE::ScalarType ScalarType;
/* Access functions */
const MeshType & Mesh() const {return m;}
MeshType & Mesh() {return m;}
/* Constructior */
TexCoordOptimization(MeshType &_m):m(_m),isFixed(_m.vert){
/* assert(m.HasPerVertexTexture());*/
}
// initializes on current geometry
virtual void TargetCurrentGeometry()=0;
// performs an interation. Returns largest movement.
virtual ScalarType Iterate()=0;
// performs an iteration (faster, but it does not tell how close it is to stopping)
virtual void IterateBlind()=0;
// performs <steps> iteration
virtual ScalarType IterateN(int step){
for (int i=0; i<step-1; i++) {
this->IterateBlind();
}
if (step>1) return this->Iterate(); else return 0;
}
// performs iterations until convergence.
virtual int IterateUntilConvergence(ScalarType threshold=0.0001, int maxite=5000){
int i=0;
while (Iterate()>threshold) {
if (i++>maxite) return i;
}
return i;
}
// desctuctor: free temporary field
~TexCoordOptimization(){
/*isFixed.Stop();*/
};
// set the current border as fixed (forced to stay in position during text optimization)
void SetBorderAsFixed(){
/*isFixed.Start();*/
for (VertexIterator v=m.vert.begin(); v!=m.vert.end(); v++) {
isFixed[v]=(v->IsB())?1:0;
}
}
// everything moves, no vertex must fixed during texture optimization)
void SetNothingAsFixed(){
//isFixed.Start();
for (VertexIterator v=m.vert.begin(); v!=m.vert.end(); v++) {
isFixed[v]=0;
}
}
// fix a given vertex
void FixVertex(const VertexType *v, bool fix=true){
isFixed[v]=(fix)?1:0;
}
bool IsFixed(const VertexType *v)
{
return (isFixed[v]);
}
bool Fixed(const FaceType* f)
{
return ((isFixed[f->V(0)])&&(isFixed[f->V(1)])&&(isFixed[f->V(2)]));
}
virtual void SetSpeed(ScalarType){
//assert(0); // by default, no speed to set
}
virtual ScalarType GetSpeed(){
//assert(0); // by default, no speed to set
return 0;
}
virtual void SetTheta(int){
assert(0);
}
virtual int GetTheta(){
assert(0);
return 0;
}
};
/*
AREA PRESERVING TEXTURE OPTIMIZATION
as in: Degener, P., Meseth, J., Klein, R.
"An adaptable surface parameterization method."
Proc. of the 12th International Meshing Roundtable, 201-213 [2003].
Features:
:) - Balances angle and area distortions (best results!).
:) - Can choose how to balance area and angle preservation (see SetTheta)
theta=0 -> pure conformal (use MIPS instead!)
theta=3 -> good balance between area and angle preservation
theta>3 -> care more about area than about angles
:( - Slowest method.
:( - Requires a fixed boundary, else expands forever in texture space (unless theta=0).
:( - Diverges in presence of flipped faces (unless theta=0).
:( - Requires a speed parameter to be set.
Speed too large => when close, bounces back and forth around minimum, w/o getting any closer.
Lower speed => longer convercence times
*/
template<class MESH_TYPE>
class AreaPreservingTexCoordOptimization:public TexCoordOptimization<MESH_TYPE>{
public:
/* Types */
typedef MESH_TYPE MeshType;
typedef typename MESH_TYPE::VertexIterator VertexIterator;
typedef typename MESH_TYPE::FaceIterator FaceIterator;
typedef typename MESH_TYPE::VertexType VertexType;
typedef typename MESH_TYPE::FaceType FaceType;
typedef typename MESH_TYPE::ScalarType ScalarType;
typedef typename MESH_TYPE::CoordType CoordType;
private:
typedef TexCoordOptimization<MESH_TYPE> Super; // superclass (commodity)
// extra data per face: [0..3] -> cotangents. [4] -> area*2
SimpleTempData<typename MESH_TYPE::FaceContainer, Point4<ScalarType> > data;
SimpleTempData<typename MESH_TYPE::VertContainer, Point2<ScalarType> > sum;
std::vector<CoordType> sumX;
std::vector<CoordType> sumY;
SimpleTempData<typename MESH_TYPE::VertContainer, Point2<ScalarType> > lastDir;
/*SimpleTempData<typename MESH_TYPE::VertContainer,omp_lock_t> lck;*/
SimpleTempData<typename MESH_TYPE::VertContainer, ScalarType > vSpeed;
ScalarType totArea;
ScalarType speed;
int theta;
public:
// constructor and destructor
AreaPreservingTexCoordOptimization(MeshType &_m):Super(_m),data(_m.face),sum(_m.vert),lastDir(_m.vert),vSpeed(_m.vert,1){
speed=(ScalarType)0.00005;
theta=3;
/*for (int i=0;i<m.vert.size();i++)
omp_init_lock(&lck[i]);*/
}
~AreaPreservingTexCoordOptimization(){
/* data.Stop();
sum.Stop();
Super::isFixed.Stop();*/
}
void SetSpeed(ScalarType _speed){
speed=_speed;
}
ScalarType GetSpeed(){
return speed;
}
// sets the parameter theta:
// good parameters are in 1..3
// 0 = converge to pure conformal, ignore area preservation
// 3 = good balance between area and conformal
// >3 = area more important, angle preservation less important
void SetTheta(int _theta){
theta=_theta;
}
int GetTheta(){
return theta;
}
void IterateBlind(){
/* todo: do as iterate, but without */
Iterate();
}
ScalarType Area(int i)
{
FaceType *f=&(Super::m.face[i]);
double val=0;
if (!(Super::isFixed[f->V(0)]&& Super::isFixed[f->V(1)] && Super::isFixed[f->V(2)]))
val=(f->V(1)->T().P()-f->V(0)->T().P())^(f->V(2)->T().P()-f->V(0)->T().P());
/* bool b0=testParamCoords(f->V(0));
bool b1=testParamCoords(f->V(1));
bool b2=testParamCoords(f->V(2));*/
if(!((fabs(val)<3.14)&&(fabs(val)>=0.0)))
{
printf("v0 %lf,%lf \n",f->V(0)->T().U(),f->V(0)->T().V());
printf("v1 %lf,%lf \n",f->V(1)->T().U(),f->V(1)->T().V());
printf("v2 %lf,%lf \n",f->V(2)->T().U(),f->V(2)->T().V());
printf("Area Value %lf \n",val);
//system("pause");
}
return fabs(val);
}
void InitSum()
{
int k;
int n=Super::m.vert.size();
int n1=Super::m.face.size();
#ifdef _USE_OMP
#pragma omp parallel for default (none) shared(n) private(k)
#endif
for (k=0;k<n;k++)
{
sum[k]=Point2<ScalarType>(0,0);
}
#ifdef _USE_OMP
#pragma omp parallel for default (none) shared(n1) private(k)
#endif
for (k=0;k<n1;k++)
{
sumX[k].X()=0;
sumX[k].Y()=0;
sumX[k].Z()=0;
sumY[k].X()=0;
sumY[k].Y()=0;
sumY[k].Z()=0;
}
#ifdef _USE_OMP
#pragma omp barrier
#endif
}
ScalarType getProjArea()
{
int k;
int n=Super::m.face.size();
ScalarType tot_proj_area=0;
//# pragma omp parallel for
#ifdef _USE_OMP
#pragma omp parallel for default (none) shared(n) private(k) reduction(+: tot_proj_area)
#endif
for (k=0;k<n; k++) {
tot_proj_area+=Area(k);
}
#ifdef _USE_OMP
#pragma omp barrier
#endif
return (tot_proj_area);
}
vcg::Point2<ScalarType> VertValue(const int &face,const int &vert,const double &scale)
{
FaceType *f=&Super::m.face[face];
/*int i=0;*/
vcg::Point2<ScalarType> t0=(f->V0(vert)->T().P());
vcg::Point2<ScalarType> t1=(f->V1(vert)->T().P());
vcg::Point2<ScalarType> t2=(f->V2(vert)->T().P());
ScalarType area2 = fabs((t1-t0) ^ (t2-t0));
ScalarType a = (t1-t0).Norm(),
b = ((t1-t0) * (t2-t0))/a,
c = area2 / a,
m0= data[face][vert] / area2,
m1= data[face][(vert+1)%3] / area2,
m2= data[face][(vert+2)%3] / area2,
mx= (b-a)/area2,
my= c/area2, // 1.0/a
mA= data[face][3]/area2* scale,
e = m0*((b-a)*(b-a)+c*c) + m1*(b*b+c*c) + m2*a*a, // as obvious
M1= mA + 1.0/mA,
M2= mA - 1.0/mA,
px= e*my,
py=-e*mx,
qx= m1*b+ m2*a,
qy= m1*c,
dx=pow(M1,theta-1)
*(px*(M1+ theta*M2) - 2.0*qx*M1),
dy=pow(M1,theta-1)
*(py*(M1+ theta*M2) - 2.0*qy*M1),
gy= dy/c,
gx= (dx - gy*b) / a;
// 3d gradient
Point2<ScalarType> val=( (t1-t0) * gx + (t2-t0) * gy ) * data[face][3];
return val;
}
void UpdateSum(const double &scale)
{
int n=Super::m.face.size();
int k;
FaceType *f;
ScalarType myscale=scale;
vcg::Point2<ScalarType> val0,val1,val2;
#ifdef _USE_OMP
#pragma omp parallel for default (none) shared(n,myscale) private(k,f,val0,val1,val2)
#endif
for (k=0;k<n; k++) {
f=&Super::m.face[k];
val0=VertValue(k,0,myscale);
val1=VertValue(k,1,myscale);
val2=VertValue(k,2,myscale);
sumX[k].V(0)=val0.X();
sumX[k].V(1)=val1.X();
sumX[k].V(2)=val2.X();
sumY[k].V(0)=val0.Y();
sumY[k].V(1)=val1.Y();
sumY[k].V(2)=val2.Y();
}
#ifdef _USE_OMP
#pragma omp barrier
#endif
}
void SumVertex()
{
for (unsigned int j=0; j<Super::m.face.size(); j++)
{
for (int i=0;i<3;i++)
{
VertexType *v=Super::m.face[j].V(i);
sum[v].X()+=sumX[j].V(i);
sum[v].Y()+=sumY[j].V(i);
}
}
}
ScalarType Iterate(){
InitSum();
ScalarType tot_proj_area=getProjArea();
double scale= tot_proj_area / totArea ;
UpdateSum(scale);
ScalarType max=0; // max displacement
// #pragma omp parallel for num_threads(4)
// for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++)
/*omp_lock_t lck0;
omp_init_lock(&lck0);*/
SumVertex();
//#pragma omp parallel for
for (unsigned int j=0; j<Super::m.vert.size(); j++)
{
VertexType *v=&Super::m.vert[j];
if ( !Super::isFixed[v] ) //if (!v->IsB())
{
ScalarType n=sum[v].Norm();
//printf("N %f \n",n);
if ( n > 1 ) { sum[v]/=n; n=1.0;}
if (lastDir[v]*sum[v]<0) vSpeed[v]*=(ScalarType)0.85; else vSpeed[v]/=(ScalarType)0.92;
lastDir[v]= sum[v];
/* if ( n*speed<=0.1 );
{*/
vcg::Point2f goal=v->T().P()-(sum[v] * (speed * vSpeed[v]) );
bool isOK=testParamCoordsPoint<ScalarType>(goal);
if (isOK)
v->T().P()-=(sum[v] * (speed * vSpeed[v]) );
n=n*speed * vSpeed[v];
//#pragma omp critical
max=std::max(max,n);
/* }*/
}
}
return max;
}
void TargetCurrentGeometry(){
/* Super::isFixed.Start();
data.Start();
sum.Start();*/
sumX.resize(Super::m.face.size());
sumY.resize(Super::m.face.size());
totArea=0;
for (FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++) {
double area2 = ((f->V(1)->P() - f->V(0)->P() )^(f->V(2)->P() - f->V(0)->P() )).Norm();
totArea+=area2;
//if ( Super::isFixed[f->V1(0)] )
for (int i=0; i<3; i++){
data[f][i]=(
(f->V1(i)->P() - f->V0(i)->P() )*(f->V2(i)->P() - f->V0(i)->P() )
)/area2;
data[f][3]=area2;
}
}
}
};
/*
MIPS TEXTURE OPTIMIZATION
Features:
:( - Targets angle distortions only (not ideal for texture mapping).
:) - Quite fast.
:) - Does not require fixed boundary (will auto-find a boundary -- up to a scale).
:) - Tends to nicely heal flipped faces
:( - Requires a speed parameter to be set
(SHOULD NOT BE LIKE THIS. BETTER IMPLEMENTATION NEEDED).
Speed too large => when close, bounces back and forth around minimum, w/o getting any closer.
Lower speed => longer convercence times
*/
template<class MESH_TYPE>
class MIPSTexCoordOptimization:public TexCoordOptimization<MESH_TYPE>{
public:
/* Types */
typedef MESH_TYPE MeshType;
typedef typename MESH_TYPE::VertexIterator VertexIterator;
typedef typename MESH_TYPE::FaceIterator FaceIterator;
typedef typename MESH_TYPE::VertexType VertexType;
typedef typename MESH_TYPE::FaceType FaceType;
typedef typename MESH_TYPE::ScalarType ScalarType;
protected:
typedef TexCoordOptimization<MESH_TYPE> Super; // superclass (commodity)
// extra data per face: [0..3] -> cotangents.
SimpleTempData<typename MESH_TYPE::FaceContainer, Point3<ScalarType> > data;
SimpleTempData<typename MESH_TYPE::VertContainer, Point2<ScalarType> > sum;
ScalarType totArea;
ScalarType speed;
public:
// constructor and destructor
MIPSTexCoordOptimization(MeshType &_m):Super(_m),data(_m.face),sum(_m.vert){
speed=(ScalarType)0.001;
}
~MIPSTexCoordOptimization(){
/* data.Stop();
sum.Stop();
Super::isFixed.Stop();*/
}
void SetSpeed(ScalarType _speed){
speed=_speed;
}
ScalarType GetSpeed(){
return speed;
}
void IterateBlind(){
/* todo: do as iterate, but without */
Iterate();
}
ScalarType Iterate(){
#define v0 (f->V(0)->T().P())
#define v1 (f->V(1)->T().P())
#define v2 (f->V(2)->T().P())
#define vi (f->V(i)->T().P())
#define vj (f->V(j)->T().P())
#define vk (f->V(k)->T().P())
for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++) {
//sum[v].Zero();
sum[v]=Point2<ScalarType>(0,0);
}
ScalarType totProjArea=0;
for (FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++) {
ScalarType area2 = ((v1-v0) ^ (v2-v0));
totProjArea+=area2;
ScalarType o[3] = { // (opposite edge)^2
( v1-v2).SquaredNorm(),
(v0 -v2).SquaredNorm(),
(v0-v1 ).SquaredNorm(),
};
ScalarType e =( data[f][0] * o[0] +
data[f][1] * o[1] +
data[f][2] * o[2] ) / (area2*area2);
for (int i=0; i<3; i++){
int j=(i+1)%3, k=(i+2)%3;
ScalarType p=(vj-vi)*(vk-vi);
ScalarType gy= e*(o[k]-p) - 2*data[f][j];
ScalarType gx= e*(o[j]-p) - 2*data[f][k];
// speed free mode: (a try!)
//sum[f->V(i)]+= ( (vj-vi) * gx + (vk-vi) * gy );// / area2;
// speed mode:
sum[f->V(i)]+= ( (vj-vi) * gx + (vk-vi) * gy ) / area2;
}
}
ScalarType max=0; // max displacement
for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++)
if ( !Super::isFixed[v] )
{
// speed free mode: (a try!)
//v->T().P()-=speed * sum[v] *totProjArea/totArea;
// speed mode:
ScalarType n=sum[v].Norm(); if ( n > 1 ) { sum[v]/=n; n=1.0;}
v->T().P()-=(sum[v] ) * speed ;
if (max<n) max=n;
}
return max;
#undef v0
#undef v1
#undef v2
#undef vi
#undef vj
#undef vk
//printf("rejected %d\n",rejected);
}
void TargetCurrentGeometry(){
/* Super::isFixed.Start();
data.Start();
sum.Start();*/
totArea=0;
for (FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++) {
double area2 = ((f->V(1)->P() - f->V(0)->P() )^(f->V(2)->P() - f->V(0)->P() )).Norm();
totArea+=area2;
for (int i=0; i<3; i++){
data[f][i]=(
(f->V1(i)->P() - f->V0(i)->P() )*(f->V2(i)->P() - f->V0(i)->P() )
);
// / area2;
}
}
}
};
#if 0 // Temporarly commented out. It still have to be thoroughly tested...
template<class MESH_TYPE>
class WachspressTexCoordOptimization:public TexCoordOptimization<MESH_TYPE>{
public:
/* Types */
typedef MESH_TYPE MeshType;
typedef typename MESH_TYPE::VertexIterator VertexIterator;
typedef typename MESH_TYPE::FaceIterator FaceIterator;
typedef typename MESH_TYPE::VertexType VertexType;
typedef typename MESH_TYPE::FaceType FaceType;
typedef typename MESH_TYPE::VertexType::TexCoordType::PointType PointType;
typedef typename MESH_TYPE::VertexType::TexCoordType::PointType::ScalarType ScalarType;
private:
class Factors{
public:
ScalarType data[3][2];
};
typedef TexCoordOptimization<MESH_TYPE> Super; // superclass (commodity)
// extra data per face: [0..3] -> cotangents. [4] -> area*2
SimpleTempData<typename MESH_TYPE::FaceContainer, Factors > factors;
public:
// constructor and destructor
WachspressTexCoordOptimization(MeshType &_m):Super(_m),factors(_m.face){
}
~WachspressTexCoordOptimization(){
/* factors.Stop();
Super::isFixed.Stop();*/
}
void IterateBlind(){
/* todo: do as iterate, but without */
Iterate();
}
ScalarType Iterate(){
ScalarType max; // max displacement
for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++) {
v->div=0; v->sum.SetZero();
}
#define vi0 (f->V(i0)->P())
#define vi1 (f->V(i1)->P())
#define vi2 (f->V(i2)->P())
#define EPSILON 1e-4
for (FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++){
ScalarType A=(((f->V(0)->P()) - (f->V(1)->P()))^((f->V(0)->P()) - (f->V(2)->P()))).Norm();
if (A<EPSILON) continue;
for (int i0=0; i0<3; i0++) {
int i1=(i0+1)%3,i2=(i0+2)%3;
ScalarType fact = (vi1-vi0)*(vi2-vi0)/A;
//fact=1;
if ( (!f->V(i1)->IsB()) /*|| f->V(o)->IsB()*/);{
f->V(i1)->sum += f->V(i0)->T().P() * fact;
f->V(i1)->div += fact;
}
if ( (!f->V(i2)->IsB()) /*|| f->V(o)->IsB()*/);{
f->V(i2)->sum += f->V(i0)->T().P() * fact;
f->V(i2)->div += fact;
}
}
}
for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++) {
if ( !Super::isFixed[v] )
if (v->div>0.001) {
v->T().P() = v->sum/v->div;
}
}
return max;
}
void TargetCurrentGeometry(){
}
};
#endif
template<class MESH_TYPE>
class MeanValueTexCoordOptimization:public TexCoordOptimization<MESH_TYPE>{
public:
/* Types */
typedef MESH_TYPE MeshType;
typedef typename MESH_TYPE::VertexIterator VertexIterator;
typedef typename MESH_TYPE::FaceIterator FaceIterator;
typedef typename MESH_TYPE::VertexType VertexType;
typedef typename MESH_TYPE::FaceType FaceType;
typedef typename MESH_TYPE::VertexType::TexCoordType::PointType PointType;
typedef typename MESH_TYPE::VertexType::TexCoordType::PointType::ScalarType ScalarType;
private:
class Factors{
public:
ScalarType data[3][2];
};
typedef TexCoordOptimization<MESH_TYPE> Super; // superclass (commodity)
// extra data per face: factors
SimpleTempData<typename MESH_TYPE::FaceContainer, Factors > factors;
// extra data per vertex: sums and div
SimpleTempData<typename MESH_TYPE::VertContainer, PointType > sum;
SimpleTempData<typename MESH_TYPE::VertContainer, ScalarType > div;
public:
// constructor and destructor
MeanValueTexCoordOptimization(MeshType &_m):Super(_m),factors(_m.face),sum(_m.vert),div(_m.vert){
}
~MeanValueTexCoordOptimization(){
/* factors.Stop();
sum.Stop();
div.Stop();
Super::isFixed.Stop();*/
}
void IterateBlind(){
/* todo: do as iterate, but without */
Iterate();
}
ScalarType Iterate(){
ScalarType max=0; // max displacement
for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++) {
sum[v]=PointType(0,0);
div[v]=0;
}
for (FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++){
for (int i=0; i<3; i++)
for (int j=1; j<3; j++) {
int d=i, o=(i+3-j)%3;
sum[f->V(d)] += f->V(o)->T().P() * factors[f].data[i][j-1];
div[f->V(d)] += factors[f].data[i][j-1];
}
}
for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++)
if ( !Super::isFixed[v] )
if ( div[v]>0.000001 ) {
PointType swap=v->T().P();
PointType goal=sum[v]/div[v];
v->T().P() = goal*(ScalarType)0.1+swap*(ScalarType)0.9;
//v->T().P()=v->RestUV*(1-v->Damp)+(sum[v]/div[v])*(v->Damp);
ScalarType temp=(swap-v->T().P()).SquaredNorm();
if (max<temp)
max=temp;
}
return max;
}
void TargetEquilateralGeometry(){
for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++) {
div[v]=0;
}
const ScalarType fact= 1.0 / sqrt(3.0);
for (FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++){
for (int i=0; i<3; i++)
for (int j=0; j<2; j++) {
factors[f].data[i][j] = fact;
div[f->V(i)] += fact ;
}
}
}
void TargetCurrentGeometry(){
/*Super::isFixed.Start();
factors.Start();
sum.Start();
div.Start();*/
for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++) {
div[v]=0;
};
for (FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++){
for (int i=0; i<3; i++)
for (int j=1; j<3; j++) factors[f].data[i][j-1]=0;
};
#define vs (f->V(s)->P())
#define vd (f->V(d)->P())
#define vo (f->V(o)->P())
#define EPSILON 1e-4
for (FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++){
int s=0,d=1,o=2;
ScalarType A=((vs - vd)^(vs - vo)).Norm();
if (A<EPSILON) break;
for (int i=0; i<3; i++)
for (int j=1; j<3; j++) {
d=i; s=(i+j)%3; o=(i+3-j)%3;
{
ScalarType dd=((vd-vs).Norm());
if (dd<=0.0001) continue;
ScalarType fact= ( ( vd -vo ).Norm() - ((vd-vo)*(vd-vs))/dd) /A;
//if (fact<0) printf("AAAGH!");
factors[f].data[d][j-1] = fact;
//f->V(d)->sum += f->V(o)->projected * fact;
div[f->V(d)] += fact ;
} //else {
//printf(".");
//}
}
}
for (FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++){
for (int i=0; i<3; i++)
for (int j=1; j<3; j++)
if (div[f->V(i)]>0.0001) {
//fact[i][j-1]/=div[f->V(i)];
}
//else f->fact[i][j-1]=0.0;
}
/*
for (f=face.begin(); f!=face.end(); f++) {
for (int i=0; i<3; i++)
for (int j=1; j<3; j++)
if (f->V(i)->div>0.01) {
f->fact[i][j-1]/=f->V(i)->div;
}
else f->fact[i][j-1]=0.0;
} */
#undef vs
#undef vd
#undef vo
}
};
/* texture coords general utility functions */
/*++++++++++++++++++++++++++++++++++++++++++*/
// returns false if any fold is present (faster than MarkFolds)
template<class MESH_TYPE>
bool IsTexCoordFoldFree(MESH_TYPE &m){
assert(HasPerVertexTexCoord(m));
typedef typename MESH_TYPE::VertexType::TexCoordType::PointType PointType;
typedef typename MESH_TYPE::VertexType::TexCoordType::PointType::ScalarType ScalarType;
ScalarType lastsign=0;
for (typename MESH_TYPE::FaceIterator f=m.face.begin(); f!=m.face.end(); f++){
ScalarType sign=((f->V(1)->T().P()-f->V(0)->T().P()) ^ (f->V(2)->T().P()-f->V(0)->T().P()));
if (sign!=0) {
if (sign*lastsign<0) return false;
lastsign=sign;
}
}
return true;
}
// detects and marks folded faces, by setting their quality to 0 (or 1 otherwise)
// returns number of folded faces
template<class MESH_TYPE>
int MarkTexCoordFolds(MESH_TYPE &m){
assert(HasPerVertexTexCoord(m));
assert(m.HasPerFaceQuality());
typedef typename MESH_TYPE::VertexType::TexCoordType::PointType PointType;
typedef typename MESH_TYPE::VertexType::TexCoordType::PointType::ScalarType ScalarType;
SimpleTempData<typename MESH_TYPE::FaceContainer, short> sign(m.face);
//sign.Start(0);
// first pass, determine predominant sign
int npos=0, nneg=0;
ScalarType lastsign=0;
for (typename MESH_TYPE::FaceIterator f=m.face.begin(); f!=m.face.end(); f++){
ScalarType fsign=((f->V(1)->T().P()-f->V(0)->T().P()) ^ (f->V(2)->T().P()-f->V(0)->T().P()));
if (fsign<0) { sign[f]=-1; nneg++; }
if (fsign>0) { sign[f]=+1; npos++; }
}
// second pass, detect folded faces
int res=0;
short gsign= (nneg>npos)?-1:+1;
for (typename MESH_TYPE::FaceIterator f=m.face.begin(); f!=m.face.end(); f++){
if (sign[f]*gsign<0){
res++;
f->Q()=0;
} else f->Q()=1;
}
//sign.Stop();
return res;
}
// Smooths texture coords.
// (can be useful to remove folds,
// e.g. these created when obtaining tecture coordinates after projections)
template<class MESH_TYPE>
void SmoothTexCoords(MESH_TYPE &m){
assert(HasPerVertexTexCoord(m));
typedef typename MESH_TYPE::VertexType::TexCoordType::PointType PointType;
typedef typename MESH_TYPE::VertexType::TexCoordType::ScalarType ScalarType;
SimpleTempData<typename MESH_TYPE::VertContainer, int> div(m.vert);
SimpleTempData<typename MESH_TYPE::VertContainer, PointType > sum(m.vert);
/* div.Start();
sum.Start();*/
for (typename MESH_TYPE::VertexIterator v=m.vert.begin(); v!=m.vert.end(); v++) {
sum[v].SetZero();
div[v]=0;
}
for (typename MESH_TYPE::FaceIterator f=m.face.begin(); f!=m.face.end(); f++){
div[f->V(0)] +=2; sum[f->V(0)] += f->V(2)->T().P(); sum[f->V(0)] += f->V(1)->T().P();
div[f->V(1)] +=2; sum[f->V(1)] += f->V(0)->T().P(); sum[f->V(1)] += f->V(2)->T().P();
div[f->V(2)] +=2; sum[f->V(2)] += f->V(1)->T().P(); sum[f->V(2)] += f->V(0)->T().P();
}
for (typename MESH_TYPE::VertexIterator v=m.vert.begin(); v!=m.vert.end(); v++)
if (!v->IsB())
{
//if (v->div>0) {
if (div[v]>0) {
v->T().P() = sum[v]/((ScalarType)div[v]);
}
}
/*div.Stop();
sum.Stop();*/
}
// MIPSTexCoordFoldHealer
// ----------------------
// Uses MIPS optimization to attempt to remove folds.
// Acts only in proximity of foleded faces!
// Use "iterateUntilConvergence" to unfold faces (returns number of performed iterations, as usual)
// Use "maxStarSize" (direct access) to determine size of affected patch around folded face
// AUTO_SPEED metaparameter:
#define AUTO_SPEED 1
// if set to one, speed is reduced/increase automatically to avoid oscillating behaviour
// (consumes memory and CPU, but increase robustness with speed parameter and sometimes converge faster)
template<class MESH_TYPE>
class MIPSTexCoordFoldHealer:public MIPSTexCoordOptimization<MESH_TYPE>{
public:
int maxStarSize; // max star size that is affected around a folded face.. Defualt: 3
typedef MESH_TYPE MeshType;
typedef typename MESH_TYPE::VertexIterator VertexIterator;
typedef typename MESH_TYPE::FaceIterator FaceIterator;
typedef typename MESH_TYPE::VertexType VertexType;
typedef typename MESH_TYPE::FaceType FaceType;
typedef typename MESH_TYPE::ScalarType ScalarType;
typedef MIPSTexCoordOptimization<MESH_TYPE> Super; // superclass (commodity)
protected:
SimpleTempData<typename MESH_TYPE::FaceContainer, bool > foldf;
SimpleTempData<typename MESH_TYPE::VertContainer, bool > foldv;
#if AUTO_SPEED
SimpleTempData<typename MESH_TYPE::VertContainer, Point2<ScalarType> > lastDir;
SimpleTempData<typename MESH_TYPE::VertContainer, ScalarType > lastSpeed;
#endif
ScalarType sign;
int nfolds;
FaceType* aFoldedFace;
void PropagateFoldF(){
for (typename MeshType::FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++){
if (foldv[ f->V(0)] || foldv[ f->V(1)] || foldv[ f->V(2) ] ) {
foldf[f] = true;
}
}
}
void PropagateFoldV(){
for (typename MeshType::FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++) {
if (foldf[f] ) {
foldv[ f->V(0) ] = foldv[ f->V(1) ] = foldv[ f->V(2) ] = true;
}
}
}
bool FindFolds(){
/*ScalarType lastsign=0;*/
int npos=0, nneg=0;
for (typename MESH_TYPE::FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++){
ScalarType sign=((f->V(1)->T().P()-f->V(0)->T().P()) ^ (f->V(2)->T().P()-f->V(0)->T().P()));
if (sign>0) { npos++; }
if (sign<0) { nneg++; }
}
if (npos*nneg==0) {sign=0; nfolds=0;} else
if (npos>nneg) { sign=+1; nfolds=nneg; } else
{ sign=-1; nfolds=npos; };
for (typename MeshType::FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++){
ScalarType signf=((f->V(1)->T().P()-f->V(0)->T().P()) ^ (f->V(2)->T().P()-f->V(0)->T().P()));
/* if ((!Super::isFixed[f->V(0)])&&(!Super::isFixed[f->V(1)])&&(!Super::isFixed[f->V(2)]))*/
foldf[f] = (signf*sign<0);
/* else
foldf[f] = false;*/
}
return true;
}
public:
int IterateUntilConvergence(ScalarType threshold=0.0001, int maxite=50){
(void)threshold;
for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++) foldv[v]=false;
FindFolds();
PropagateFoldV();
PropagateFoldF();
/* int i=0;*/
int nite = 0, totIte=0, pass=0;
while (Iterate()>0) {
totIte++;
nite++;
if (nite>=maxite) {
PropagateFoldV();
PropagateFoldF();
nite=0;
if (pass++>=maxStarSize) break; // number of passes
}
}
return totIte;
}
// constructor and destructor
MIPSTexCoordFoldHealer(MeshType &_m):MIPSTexCoordOptimization<MeshType>(_m),foldf(_m.face),foldv(_m.vert)
#if AUTO_SPEED
,lastDir(_m.vert),lastSpeed(_m.vert,1.0)
#endif
{sign=0; nfolds=0; maxStarSize=3; };
~MIPSTexCoordFoldHealer(){
/* data.Stop();
sum.Stop();
Super::isFixed.Stop();*/
}
ScalarType Iterate(){
#define v0 (f->V(0)->T().P())
#define v1 (f->V(1)->T().P())
#define v2 (f->V(2)->T().P())
#define vi (f->V(i)->T().P())
#define vj (f->V(j)->T().P())
#define vk (f->V(k)->T().P())
for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++) {
//sum[v].Zero();
Super::sum[v]=Point2<ScalarType>(0,0);
}
ScalarType totProjArea=0;
nfolds=0;
for (FaceIterator f=Super::m.face.begin(); f!=Super::m.face.end(); f++) {
if (Super::isFixed[f->V(0)] && Super::isFixed[f->V(1)] && Super::isFixed[f->V(2)]) continue;
if (!foldf[f]) continue;
ScalarType area2 = ((v1-v0) ^ (v2-v0));
if (area2*sign<0) nfolds++;
totProjArea+=area2;
ScalarType o[3] = { // (opposite edge)^2
( v1-v2).SquaredNorm(),
(v0 -v2).SquaredNorm(),
(v0-v1 ).SquaredNorm(),
};
ScalarType e =( Super::data[f][0] * o[0] +
Super::data[f][1] * o[1] +
Super::data[f][2] * o[2] ) / (area2*area2);
for (int i=0; i<3; i++){
int j=(i+1)%3, k=(i+2)%3;
ScalarType p=(vj-vi)*(vk-vi);
ScalarType gy= e*(o[k]-p) - 2*Super::data[f][j];
ScalarType gx= e*(o[j]-p) - 2*Super::data[f][k];
// speed free mode: (a try!)
//sum[f->V(i)]+= ( (vj-vi) * gx + (vk-vi) * gy );// / area2;
// speed mode:
Super::sum[f->V(i)]+= ( (vj-vi) * gx + (vk-vi) * gy ) / area2;
}
}
if (nfolds==0) return 0;
for (VertexIterator v=Super::m.vert.begin(); v!=Super::m.vert.end(); v++)
if ( !Super::isFixed[v] && foldv[v] )
{
ScalarType n=Super::sum[v].Norm(); if ( n > 1 ) { Super::sum[v]/=n; n=1.0;}
#if AUTO_SPEED
if (Super::sum[v]*lastDir[v] < 0.0) lastSpeed[v]*=(ScalarType)0.8; else lastSpeed[v]*=(ScalarType)1.1;
lastDir[v]=Super::sum[v];
v->T().P()-=(Super::sum[v] ) * (Super::speed * lastSpeed[v] );
#else
v->T().P()-=(Super::sum[v] ) * Super::speed;
#endif
}
return (ScalarType)nfolds;
#undef v0
#undef v1
#undef v2
#undef vi
#undef vj
#undef vk
//printf("rejected %d\n",rejected);
}
};
} } // End namespace vcg::tri
#endif // __VCGLIB__TEXTCOOORD_OPTIMIZATION
|
HSetMaintainer.h | #ifndef HSET_MAINTAINER_H
#define HSET_MAINTAINER_H
/*************************************************************
* Copyright: (C) 2012 by Markus Schordan *
* Author : Markus Schordan *
* License : see file LICENSE in the CodeThorn distribution *
*************************************************************/
#include <boost/unordered_set.hpp>
//#define HSET_MAINTAINER_DEBUG_MODE
/*!
* \author Markus Schordan
* \date 2012.
*/
template<typename KeyType,typename HashFun, typename EqualToPred>
class HSetMaintainer
: public boost::unordered_set<KeyType*,HashFun,EqualToPred>
{
public:
typedef std::pair<bool,const KeyType*> ProcessingResult;
/*!
* \author Marc Jasper
* \date 2016.
*/
HSetMaintainer() { _keepStatesDuringDeconstruction = false; }
/*!
* \author Marc Jasper
* \date 2016.
*/
HSetMaintainer(bool keepStates) { _keepStatesDuringDeconstruction = keepStates; }
/*!
* \author Marc Jasper
* \date 2016.
*/
virtual ~HSetMaintainer() {
if (!_keepStatesDuringDeconstruction){
typename HSetMaintainer::iterator i;
for (i=this->begin(); i!=this->end(); ++i) {
delete (*i);
}
}
}
bool exists(KeyType& s) {
return determine(s)!=0;
}
size_t id(const KeyType& s) {
typename boost::unordered_set<KeyType*,HashFun,EqualToPred>::const_iterator i;
i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(s);
if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) {
// in lack of operator '-' we compute the distance
size_t pos=0;
typename boost::unordered_set<KeyType*,HashFun,EqualToPred>::const_iterator b;
b=HSetMaintainer<KeyType,HashFun,EqualToPred>::begin();
while(b!=i) {
pos++;
++b;
}
return pos;
}
else
throw "Error: unknown value. Maintainer cannot determine an id.";
}
typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i;
KeyType* determine(KeyType& s) {
KeyType* ret=0;
typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i;
#pragma omp critical(HASHSET)
{
i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(&s);
if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) {
ret=const_cast<KeyType*>(*i);
} else {
ret=0;
}
}
return ret;
}
const KeyType* determine(const KeyType& s) {
const KeyType* ret=0;
typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i;
#pragma omp critical(HASHSET)
{
i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(const_cast<KeyType*>(&s));
if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) {
ret=const_cast<KeyType*>(*i);
} else {
ret=0;
}
}
return ret;
}
ProcessingResult process(const KeyType* key) {
ProcessingResult res2;
#pragma omp critical(HASHSET)
{
std::pair<typename HSetMaintainer::iterator, bool> res;
typename HSetMaintainer::iterator iter=this->find(const_cast<KeyType*>(key)); // TODO: eliminate const_cast
if(iter!=this->end()) {
// found it!
res=std::make_pair(iter,false);
} else {
res=this->insert(const_cast<KeyType*>(key)); // TODO: eliminate const_cast
}
res2=std::make_pair(res.second,*res.first);
}
return res2;
}
const KeyType* processNewOrExisting(const KeyType* s) {
ProcessingResult res=process(s);
return res.second;
}
//! <true,const KeyType> if new element was inserted
//! <false,const KeyType> if element already existed
ProcessingResult process(KeyType key) {
ProcessingResult res2;
#pragma omp critical(HASHSET)
{
std::pair<typename HSetMaintainer::iterator, bool> res;
typename HSetMaintainer::iterator iter=this->find(&key);
if(iter!=this->end()) {
// found it!
res=std::make_pair(iter,false);
} else {
// converting the stack allocated object to heap allocated
// this copies the entire object
// TODO: this can be avoided by providing a process function with a pointer arg
// this requires a more detailed result: pointer exists, alternate pointer with equal object exists, does not exist
KeyType* keyPtr=new KeyType();
*keyPtr=key;
res=this->insert(keyPtr);
if (!res.second) {
// this case should never occur, condition "iter!=this->end()" above would have been satisfied and
// this else branch would have therefore been ignored
std::cerr << "ERROR: HSetMaintainer: Element was not inserted even though it could not be found in the set." << std::endl;
ROSE_ASSERT(0);
delete keyPtr;
keyPtr = NULL;
}
}
#ifdef HSET_MAINTAINER_DEBUG_MODE
std::pair<typename HSetMaintainer::iterator, bool> res1;
res1=this->insert(key);
std::pair<typename HSetMaintainer::iterator, bool> res2;
res2=this->insert(key);
if(!(res1==res2)) {
std::cerr<< "Error: HsetMaintainer failed:"<<std::endl;
std::cerr<< "res1:"<<(*res1.first).toString()<<":"<<res1.second<<std::endl;
std::cerr<< "res2:"<<(*res2.first).toString()<<":"<<res2.second<<std::endl;
exit(1);
}
std::cerr << "HSET insert OK"<<std::endl;
#endif
res2=std::make_pair(res.second,*res.first);
}
return res2;
}
const KeyType* processNew(KeyType& s) {
//std::pair<typename HSetMaintainer::iterator, bool> res=process(s);
ProcessingResult res=process(s);
if(res.first!=true) {
std::cerr<< "Error: HsetMaintainer::processNew failed:"<<std::endl;
std::cerr<< "res:";
std::cout <<":"<<res.first<<std::endl;
std::cout <<res.second->toString();
exit(1);
}
return res.second;
}
const KeyType* processNewOrExisting(KeyType& s) {
ProcessingResult res=process(s);
return res.second;
}
long numberOf() { return HSetMaintainer<KeyType,HashFun,EqualToPred>::size(); }
long maxCollisions() {
size_t max=0;
for(size_t i=0; i<HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_count();++i) {
if(HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_size(i)>max) {
max=HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_size(i);
}
}
return max;
}
double loadFactor() {
return HSetMaintainer<KeyType,HashFun,EqualToPred>::load_factor();
}
long memorySize() const {
long mem=0;
for(typename HSetMaintainer<KeyType,HashFun,EqualToPred>::const_iterator i
=HSetMaintainer<KeyType,HashFun,EqualToPred>::begin();
i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end();
++i) {
mem+=(*i)->memorySize();
mem+=sizeof(*i);
}
return mem+sizeof(*this);
}
private:
//const KeyType* ptr(KeyType& s) {}
bool _keepStatesDuringDeconstruction;
};
#endif
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
MagickExport int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainCoordinate(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status&=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5));
y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
status&=SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine,exception);
else
status&=CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(MaxStrokePad,MaxStrokePad);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
busy-cpu-and-mem.c | /*
AUTHOR : Ali Snedden
DATE : 1/24/22
PURPOSE :
1. Create a segmentation fault to test
a) enabling segfaults on a Redhat machine
#) to test `ulimit data`
#. Enabling core files :
a) Tasks
#. Ensure that `ulimit -c unlimited`
#. Ensure both abrtd and abrt-ccpp are running
* service abrtd status
* service abrt-ccpp status
#. In /etc/abrt/abrt-action-save-package-data.conf ensure
`ProcessUnpackaged = yes`
#. Run a test bad code
* Typically the file will be written /var/spool/abrt/
* If you do `cat /proc/sys/kernel/core_pattern`, you'll see some stuff
* If you `echo "core" > /proc/sys/kernel/core_pattern` it will dump
to a local file
#) Refs :
#. https://unix.stackexchange.com/a/277338/128519
#. https://stackoverflow.com/a/42272400/4021436
#. https://access.redhat.com/solutions/4896
#. https://access.redhat.com/solutions/56021
RUN:
./a.out nthreads mem_in_GB
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <sys/types.h>
#include <omp.h>
/*
ARGS :
string = char[], string to print
DESCRIPTION :
This prints and error and then exits with a non-zero error code
RETURN :
1 = int, b/c it is an error
DEBUG :
FUTURE :
*/
int exit_with_error(char * string){
fprintf(stderr, "%s",string);
return 1;
}
/*
ARGS :
argc = int, number of CL args
argv[] = char[], CL args
DESCRIPTION :
This prints and error and then exits with a non-zero error code
RETURN :
0 = int, b/c it completed w/o error
DEBUG :
FUTURE :
1. Come up with method to check the number of elements in argv
*/
int main(int argc, char *argv[])
{
char errMsg[200]; // Used for error messages
long nThread; // Number of threads, from CL args
long nLoop = 1000; // Number of loops
long i = 0;
long j = 0;
long k = 0;
long mem = 0; // Memory in GB
long n; // Number of array elements, from CL args
long nbytes; // Total bytes
long * array = NULL; // Array to do operations
/************* Parse CL args *************/
// Check for correct number of arguments
if(argc != 3){
sprintf(errMsg, "ERROR!!! %i args passed, only 3 expected\n", argc);
exit_with_error(errMsg);
}
// Number of threads
nThread = atoi(argv[1]);
// Memory
mem = atol(argv[2]);
printf("Running with %ld procs and %ld GB\n", nThread, mem);
printf("pid = %ld; parentid = %ld\n", (long)getpid(), (long)getppid());
// OpenMP creates nThread copies of array, so account
nbytes = mem * pow(10,9);
n = nbytes / (nThread * sizeof(long));
// https://stackoverflow.com/a/12285433/4021436
omp_set_num_threads(nThread);
#pragma omp parallel private(i, array)
{
long tid = omp_get_thread_num();
printf("Hello from Thread : %ld\n",tid);
// Initialize - stop compiler complaints
array = (long *)malloc(n * sizeof(long));
// This loop doesn't do any real work other
#pragma omp for
for(k=0; k<nThread; k++){
for(i=0; i<nLoop; i++){
if(tid == 0 && i%10 == 0){
printf("\ttid: %ld; i=%ld\n",tid,i);
}
fflush(stdout);
for(j=0; j<n; j++){
array[j] = tid;
}
}
}
}
return 0;
}
|
transpose.h | // This code is part of the Problem Based Benchmark Suite (PBBS)
// Copyright (c) 2011-2016 Guy Blelloch and the PBBS team
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights (to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#pragma once
#include "utilities.h"
#include "get_time.h"
namespace pbbs {
constexpr const size_t TRANS_THRESHHOLD = PAR_GRANULARITY/4;
inline size_t split(size_t n) {
return n/2;
//return ((((size_t) 1) << log2_up(n) != n) ? n/2 : (7*(n+1))/16);
}
template <class E>
struct transpose {
E *A, *B;
transpose(E *AA, E *BB) : A(AA), B(BB) {}
void transR(size_t rStart, size_t rCount, size_t rLength,
size_t cStart, size_t cCount, size_t cLength) {
if (cCount*rCount < TRANS_THRESHHOLD) {
for (size_t i=rStart; i < rStart+ rCount; i++)
for (size_t j=cStart; j < cStart + cCount; j++)
B[j*cLength + i] = A[i*rLength + j];
} else if (cCount > rCount) {
size_t l1 = split(cCount);
size_t l2 = cCount - l1;
auto left = [&] () {
transR(rStart,rCount,rLength,cStart,l1,cLength);};
auto right = [&] () {
transR(rStart,rCount,rLength,cStart + l1,l2,cLength);};
par_do(left, right);
} else {
size_t l1 = split(cCount);
size_t l2 = rCount - l1;
auto left = [&] () {
transR(rStart,l1,rLength,cStart,cCount,cLength);};
auto right = [&] () {
transR(rStart + l1,l2,rLength,cStart,cCount,cLength);};
par_do(left, right);
}
}
void trans(size_t rCount, size_t cCount) {
#if defined(OPENMP)
#pragma omp parallel
#pragma omp single
#endif
transR(0,rCount,cCount,0,cCount,rCount);
}
};
template <class E, class int_t>
struct blockTrans {
E *A, *B;
int_t *OA, *OB;
blockTrans(E *AA, E *BB, int_t *OOA, int_t *OOB)
: A(AA), B(BB), OA(OOA), OB(OOB) {}
void transR(size_t rStart, size_t rCount, size_t rLength,
size_t cStart, size_t cCount, size_t cLength) {
if (cCount*rCount < TRANS_THRESHHOLD*16) {
parallel_for(rStart, rStart+rCount, [&] (size_t i) {
for (size_t j=cStart; j < cStart + cCount; j++) {
size_t sa = OA[i*rLength + j];
size_t sb = OB[j*cLength + i];
size_t l = OA[i*rLength + j + 1] - sa;
for (size_t k =0; k < l; k++)
copy_memory(B[k+sb], A[k+sa]);
}
});
} else if (cCount > rCount) {
size_t l1 = split(cCount);
size_t l2 = cCount - l1;
auto left = [&] () {
transR(rStart,rCount,rLength,cStart,l1,cLength);};
auto right = [&] () {
transR(rStart,rCount,rLength,cStart + l1,l2,cLength);};
par_do(left, right);
} else {
size_t l1 = split(cCount);
size_t l2 = rCount - l1;
auto left = [&] () {
transR(rStart,l1,rLength,cStart,cCount,cLength);};
auto right = [&] () {
transR(rStart + l1,l2,rLength,cStart,cCount,cLength);};
par_do(left, right);
}
}
void trans(size_t rCount, size_t cCount) {
#if defined(OPENMP)
#pragma omp parallel
#pragma omp single
#endif
transR(0,rCount,cCount,0,cCount,rCount);
}
} ;
// Moves values from blocks to buckets
// From is sorted by key within each block, in block major
// counts is the # of keys in each bucket for each block, in block major
// From and To are of lenght n
// counts is of length num_blocks * num_buckets
// Data is memcpy'd into To avoiding initializers and overloaded =
template<typename E, typename s_size_t>
sequence<size_t> transpose_buckets(E* From, E* To,
sequence<s_size_t> const & counts,
size_t n,
size_t block_size,
size_t num_blocks,
size_t num_buckets) {
timer t("transpose", false);
size_t m = num_buckets * num_blocks;
sequence<s_size_t> dest_offsets;
auto add = addm<s_size_t>();
// for smaller input do non-cache oblivious version
if (n < (1 << 22) || num_buckets <= 512 || num_blocks <= 512) {
size_t block_bits = log2_up(num_blocks);
size_t block_mask = num_blocks-1;
if ((size_t) 1 << block_bits != num_blocks)
throw std::invalid_argument("in transpose_buckets: num_blocks must be a power or 2");
// determine the destination offsets
auto get = [&] (size_t i) {
return counts[(i>>block_bits) + num_buckets*(i&block_mask)];};
// slow down?
dest_offsets = sequence<s_size_t>(m, get);
size_t sum = scan_inplace(dest_offsets.slice(), add);
if (sum != n)
throw std::logic_error("in transpose, internal bad count");
t.next("seq and scan");
// send each key to correct location within its bucket
auto f = [&] (size_t i) {
size_t s_offset = i * block_size;
for (size_t j= 0; j < num_buckets; j++) {
size_t d_offset = dest_offsets[i+ num_blocks*j];
size_t len = counts[i*num_buckets+j];
for (size_t k =0; k < len; k++)
copy_memory(To[d_offset++], From[s_offset++]);
}
};
parallel_for(0, num_blocks, f, 1);
t.next("trans");
} else { // for larger input do cache efficient transpose
//sequence<s_size_t> source_offsets(counts,m+1);
dest_offsets = sequence<s_size_t>(m);
transpose<s_size_t>(counts.begin(), dest_offsets.begin()).trans(num_blocks,
num_buckets);
t.next("trans 1");
// do both scans inplace
size_t total = scan_inplace(dest_offsets.slice(), add);
size_t total2 = scan_inplace(counts.slice(), add);
if (total != n || total2 != n)
throw std::logic_error("in transpose, internal bad count");
counts[m] = n;
t.next("scans");
blockTrans<E,s_size_t>(From, To, counts.begin(),
dest_offsets.begin()).trans(num_blocks, num_buckets);
t.next("trans 2");
}
// return the bucket offsets, padded with n at the end
return sequence<size_t>(num_buckets+1, [&] (size_t i) {
return (i==num_buckets) ? n : dest_offsets[i*num_blocks];});
}
}
|
fss_cprg.c | #include "fss_cprg.h"
#include "floram_util.h"
#include "ackutil.h"
#include <omp.h>
struct fss_cprg_offline {
size_t size;
size_t blockmultiple;
size_t startlevel;
size_t thislevel;
size_t endlevel;
size_t thislevelblocks;
size_t nextlevelblocks;
void * Z;
bool * advicebits_l;
bool * advicebits_r;
uint8_t * level_data;
uint64_t * lda;
uint64_t * ldb;
uint8_t * lda2;
uint8_t * ldb2;
bool * level_bits;
bool * lba;
bool * lbb;
void * keyL;
void * keyR;
};
typedef struct block_t {
uint64_t data[BLOCKSIZE/sizeof(uint64_t)];
} block_t;
void block_xor(block_t * a, block_t * b) {
#pragma omp simd
for (uint8_t ii = 0; ii < BLOCKSIZE/sizeof(uint64_t); ii++) {
a->data[ii] ^= b->data[ii];
}
}
#pragma omp declare reduction(^: block_t: block_xor(&omp_out, &omp_in)) initializer (omp_priv = { 0 })
void fss_cprg_offline_start(uint8_t * local_output, bool * local_bit_output, uint64_t * accumulator_L, uint64_t * accumulator_R, fss_cprg_offline * fsso) {
fsso->thislevel = 0;
fsso->thislevelblocks = 1;
fsso->nextlevelblocks = 2;
#ifdef ORAM_PROFILE_SCHEDULING
printf("START FSS CPRG OFFLINE LEVEL %d %lld\n", fsso->thislevel,current_timestamp());
#endif
fsso->lda = (uint64_t *)fsso->level_data;
fsso->lda2 = (uint8_t *)fsso->level_data;
fsso->ldb = (uint64_t *)local_output;
fsso->ldb2 = (uint8_t *)local_output;
fsso->lba = fsso->level_bits;
fsso->lbb = local_bit_output;
memset(accumulator_L, 0, BLOCKSIZE);
memset(accumulator_R, 0, BLOCKSIZE);
get_random_bytes(fsso->lda2, BLOCKSIZE);
if (ocCurrentParty() == 1) fsso->lda2[0] &= 0xFE;
else fsso->lda2[0] |= 1;
uint64_t * lda = fsso->lda;
uint64_t * ldb = fsso->ldb;
block_t accL;
block_t accR;
offline_prg(&fsso->ldb2[0], fsso->lda2, fsso->keyL);
offline_prg(&fsso->ldb2[BLOCKSIZE], fsso->lda2, fsso->keyR);
#pragma omp simd aligned(ldb, accumulator_L, accumulator_R:16)
for (size_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
accumulator_L[jj] ^= ldb[jj];
accumulator_R[jj] ^= ldb[BLOCKSIZE/sizeof(uint64_t)+jj];
}
fsso->lba[0] = fsso->lda2[0] & 1;
if (fsso->thislevel == fsso->endlevel) {
memcpy(fsso->ldb, fsso->lda, BLOCKSIZE*fsso->blockmultiple);
memcpy(accumulator_L, fsso->lda, BLOCKSIZE*fsso->blockmultiple);
memcpy(fsso->lbb, fsso->lba, sizeof(bool));
}
#ifdef ORAM_PROFILE_SCHEDULING
printf("END FSS CPRG OFFLINE LEVEL %d %lld\n", fsso->thislevel,current_timestamp());
#endif
}
void fss_cprg_offline_process_round(uint8_t * accumulator_L, uint8_t * accumulator_R, uint8_t * z, bool advicebit_l, bool advicebit_r, fss_cprg_offline * fsso) {
fsso->thislevel += 1;
fsso->thislevelblocks = fsso->nextlevelblocks;
fsso->nextlevelblocks = (fsso->size + (1ll<<(fsso->endlevel - fsso->thislevel -1)) - 1) / (1ll<<(fsso->endlevel - fsso->thislevel -1));
if (fsso->thislevel == fsso->endlevel -1) fsso->nextlevelblocks = fsso->size;
#ifdef ORAM_PROFILE_SCHEDULING
printf("START FSS CPRG OFFLINE LEVEL %d %lld\n", fsso->thislevel,current_timestamp());
#endif
uint64_t* t; uint8_t* t2; bool * tb;
size_t expansion_stride;
t2 = fsso->ldb2; t = fsso->ldb; tb = fsso->lbb;
fsso->ldb2 = fsso->lda2; fsso->ldb = fsso->lda; fsso->lbb = fsso->lba;
fsso->lda2 = t2; fsso->lda = t; fsso->lba = tb;
uint64_t * lda = fsso->lda;
uint64_t * ldb = fsso->ldb;
block_t accL = {0};
block_t accR = {0};
if (fsso->thislevel == fsso->endlevel - 1 && (fsso->thislevel % 2) == 0) {
expansion_stride = (BLOCKSIZE * fsso->blockmultiple);
} else {
expansion_stride = BLOCKSIZE;
}
floram_set_procs_for_data_size(BLOCKSIZE * (fsso->nextlevelblocks + fsso->thislevelblocks));
#pragma omp parallel for reduction(^:accL,accR) schedule(guided)
for (size_t ii = 0; ii < 4*(fsso->nextlevelblocks/8); ii+=4) {
fsso->lba[ii] = (fsso->lda2[ii*BLOCKSIZE] & 1) ^ (fsso->lbb[ii/2] & advicebit_l);
fsso->lba[ii+1] = (fsso->lda2[(ii+1)*BLOCKSIZE] & 1) ^ (fsso->lbb[ii/2] & advicebit_r);
fsso->lba[ii+2] = (fsso->lda2[(ii+2)*BLOCKSIZE] & 1) ^ (fsso->lbb[(ii+2)/2] & advicebit_l);
fsso->lba[ii+3] = (fsso->lda2[(ii+3)*BLOCKSIZE] & 1) ^ (fsso->lbb[(ii+2)/2] & advicebit_r);
if (fsso->lbb[ii/2]) {
#pragma omp simd aligned(lda,z:16)
for (uint8_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
lda[ii*(BLOCKSIZE/sizeof(uint64_t))+jj] ^= ((uint64_t *)z)[jj];
lda[(ii+1)*(BLOCKSIZE/sizeof(uint64_t))+jj] ^= ((uint64_t *)z)[jj];
}
}
if (fsso->lbb[(ii+2)/2]) {
#pragma omp simd aligned(lda,z:16)
for (uint8_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
lda[(ii+2)*(BLOCKSIZE/sizeof(uint64_t))+jj] ^= ((uint64_t *)z)[jj];
lda[(ii+3)*(BLOCKSIZE/sizeof(uint64_t))+jj] ^= ((uint64_t *)z)[jj];
}
}
offline_prg_oct(&fsso->ldb2[ii*2*expansion_stride], &fsso->ldb2[(ii*2+1)*expansion_stride], &fsso->ldb2[(ii*2+2)*expansion_stride], &fsso->ldb2[(ii*2+3)*expansion_stride],
&fsso->ldb2[(ii*2+4)*expansion_stride], &fsso->ldb2[(ii*2+5)*expansion_stride], &fsso->ldb2[(ii*2+6)*expansion_stride], &fsso->ldb2[(ii*2+7)*expansion_stride],
&fsso->lda2[ii*BLOCKSIZE], &fsso->lda2[ii*BLOCKSIZE], &fsso->lda2[(ii+1)*BLOCKSIZE], &fsso->lda2[(ii+1)*BLOCKSIZE],
&fsso->lda2[(ii+2)*BLOCKSIZE], &fsso->lda2[(ii+2)*BLOCKSIZE], &fsso->lda2[(ii+3)*BLOCKSIZE], &fsso->lda2[(ii+3)*BLOCKSIZE],
fsso->keyL, fsso->keyR, fsso->keyL, fsso->keyR,
fsso->keyL, fsso->keyR, fsso->keyL, fsso->keyR);
#pragma omp simd aligned(ldb:16)
for (size_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
accL.data[jj] ^= ldb[ii*2*expansion_stride/sizeof(uint64_t)+jj] ^ ldb[(ii*2+2)*expansion_stride/sizeof(uint64_t)+jj]
^ ldb[(ii*2+4)*expansion_stride/sizeof(uint64_t)+jj] ^ ldb[(ii*2+6)*expansion_stride/sizeof(uint64_t)+jj];
accR.data[jj] ^= ldb[(ii*2+1)*expansion_stride/sizeof(uint64_t)+jj] ^ ldb[(ii*2+3)*expansion_stride/sizeof(uint64_t)+jj]
^ ldb[(ii*2+5)*expansion_stride/sizeof(uint64_t)+jj] ^ ldb[(ii*2+7)*expansion_stride/sizeof(uint64_t)+jj];
}
}
for (size_t ii = 4*(fsso->nextlevelblocks/8); ii < fsso->thislevelblocks ; ii++) {
if (ii%2 == 0) {
fsso->lba[ii] = (fsso->lda2[ii*BLOCKSIZE] & 1) ^ (fsso->lbb[ii/2] & advicebit_l);
} else {
fsso->lba[ii] = (fsso->lda2[ii*BLOCKSIZE] & 1) ^ (fsso->lbb[ii/2] & advicebit_r);
}
if (fsso->lbb[ii/2]) {
#pragma omp simd aligned(lda,z:16)
for (uint8_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
lda[ii*(BLOCKSIZE/sizeof(uint64_t))+jj] ^= ((uint64_t *)z)[jj];
}
}
if ((ii+1)*2 <= fsso->nextlevelblocks) {
offline_prg(&fsso->ldb2[ii*2*expansion_stride], &fsso->lda2[ii*BLOCKSIZE], fsso->keyL);
offline_prg(&fsso->ldb2[(ii*2+1)*expansion_stride], &fsso->lda2[ii*BLOCKSIZE], fsso->keyR);
#pragma omp simd aligned(ldb:16)
for (size_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
accL.data[jj] ^= ldb[ii*2*expansion_stride/sizeof(uint64_t)+jj];
accR.data[jj] ^= ldb[(ii*2+1)*expansion_stride/sizeof(uint64_t)+jj];
}
} else if (ii*2+1 <= fsso->nextlevelblocks) {
offline_prg(&fsso->ldb2[ii*2*expansion_stride], &fsso->lda2[ii*BLOCKSIZE], fsso->keyL);
#pragma omp simd aligned(ldb:16)
for (size_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
accL.data[jj] ^= ldb[ii*2*expansion_stride/sizeof(uint64_t)+jj];
}
}
}
for (size_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
((uint64_t *)accumulator_L)[jj] = accL.data[jj];
((uint64_t *)accumulator_R)[jj] = accR.data[jj];
}
#ifdef ORAM_PROFILE_SCHEDULING
printf("END FSS CPRG OFFLINE LEVEL %d %lld\n", fsso->thislevel,current_timestamp());
#endif
}
void fss_cprg_offline_finalize(uint8_t * accumulator, uint8_t * z, bool advicebit_l, bool advicebit_r, fss_cprg_offline * fsso) {
fsso->thislevel += 1;
fsso->thislevelblocks = fsso->nextlevelblocks;
#ifdef ORAM_PROFILE_SCHEDULING
printf("START FSS CPRG OFFLINE LEVEL %d %lld\n", fsso->thislevel,current_timestamp());
#endif
uint64_t* t; uint8_t* t2; bool * tb;
t2 = fsso->ldb2; t = fsso->ldb; tb = fsso->lbb;
fsso->ldb2 = fsso->lda2; fsso->ldb = fsso->lda; fsso->lbb = fsso->lba;
fsso->lda2 = t2; fsso->lda = t; fsso->lba = tb;
uint64_t * lda = fsso->lda;
uint64_t * ldb = fsso->ldb;
uint8_t * local_output;
block_t acc = {0};
if (fsso->thislevel%2==0) {
local_output = fsso->ldb2;
floram_set_procs_for_data_size(BLOCKSIZE * fsso->thislevelblocks * 2);
#pragma omp parallel for reduction(^:acc) schedule(guided)
for (size_t ii = 0; ii < fsso->thislevelblocks; ii++) {
if (ii%2 == 0) {
fsso->lba[ii] = (fsso->lda2[ii*BLOCKSIZE] & 1) ^ (fsso->lbb[ii/2] & advicebit_l);
} else {
fsso->lba[ii] = (fsso->lda2[ii*BLOCKSIZE] & 1) ^ (fsso->lbb[ii/2] & advicebit_r);
}
if (fsso->lbb[ii/2]) {
#pragma omp simd aligned(ldb,lda,z:16)
for (uint8_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
ldb[ii*((BLOCKSIZE*fsso->blockmultiple)/sizeof(uint64_t))+jj] = lda[ii*(BLOCKSIZE/sizeof(uint64_t))+jj] ^ ((uint64_t *)z)[jj];
acc.data[jj] ^= ldb[ii*((BLOCKSIZE*fsso->blockmultiple)/sizeof(uint64_t))+jj];
}
} else {
memcpy(&fsso->ldb[ii*((BLOCKSIZE*fsso->blockmultiple)/sizeof(uint64_t))], &fsso->lda[ii*(BLOCKSIZE/sizeof(uint64_t))], BLOCKSIZE);
#pragma omp simd aligned(ldb:16)
for (uint8_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
acc.data[jj] ^= ldb[ii*((BLOCKSIZE*fsso->blockmultiple)/sizeof(uint64_t))+jj];
}
}
}
memcpy(fsso->lbb, fsso->lba, fsso->thislevelblocks*sizeof(bool));
} else {
local_output = fsso->lda2;
floram_set_procs_for_data_size(BLOCKSIZE * fsso->thislevelblocks);
#pragma omp parallel for reduction(^:acc) schedule(guided)
for (size_t ii = 0; ii < fsso->thislevelblocks; ii++) {
if (ii%2 == 0) {
fsso->lba[ii] = (fsso->lda2[ii*(BLOCKSIZE*fsso->blockmultiple)] & 1) ^ (fsso->lbb[ii/2] & advicebit_l);
} else {
fsso->lba[ii] = (fsso->lda2[ii*(BLOCKSIZE*fsso->blockmultiple)] & 1) ^ (fsso->lbb[ii/2] & advicebit_r);
}
if (fsso->lbb[ii/2]) {
#pragma omp simd aligned(lda,z:16)
for (uint8_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
lda[ii*((BLOCKSIZE*fsso->blockmultiple)/sizeof(uint64_t))+jj] ^= ((uint64_t *)z)[jj];
acc.data[jj] ^= lda[ii*((BLOCKSIZE*fsso->blockmultiple)/sizeof(uint64_t))+jj];
}
} else {
#pragma omp simd aligned(lda:16)
for (uint8_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
acc.data[jj] ^= lda[ii*((BLOCKSIZE*fsso->blockmultiple)/sizeof(uint64_t))+jj];
}
}
}
}
for (size_t jj = 0; jj < BLOCKSIZE/sizeof(uint64_t); jj++) {
((uint64_t *)accumulator)[jj] = acc.data[jj];
}
for (size_t jj = 1; jj < fsso->blockmultiple; jj++) {
for (size_t ii = 0; ii < BLOCKSIZE/sizeof(uint64_t); ii++) acc.data[ii] = 0;
#pragma omp parallel for reduction(^:acc) schedule(guided)
for (size_t ii = 0; ii < 8*(fsso->thislevelblocks/8); ii+=8) {
offline_prg_oct(
&local_output[(ii+0) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)],
&local_output[(ii+1) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)],
&local_output[(ii+2) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)],
&local_output[(ii+3) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)],
&local_output[(ii+4) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)],
&local_output[(ii+5) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)],
&local_output[(ii+6) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)],
&local_output[(ii+7) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)],
&local_output[(ii+0) * (BLOCKSIZE*fsso->blockmultiple) + ((jj-1) * BLOCKSIZE)],
&local_output[(ii+1) * (BLOCKSIZE*fsso->blockmultiple) + ((jj-1) * BLOCKSIZE)],
&local_output[(ii+2) * (BLOCKSIZE*fsso->blockmultiple) + ((jj-1) * BLOCKSIZE)],
&local_output[(ii+3) * (BLOCKSIZE*fsso->blockmultiple) + ((jj-1) * BLOCKSIZE)],
&local_output[(ii+4) * (BLOCKSIZE*fsso->blockmultiple) + ((jj-1) * BLOCKSIZE)],
&local_output[(ii+5) * (BLOCKSIZE*fsso->blockmultiple) + ((jj-1) * BLOCKSIZE)],
&local_output[(ii+6) * (BLOCKSIZE*fsso->blockmultiple) + ((jj-1) * BLOCKSIZE)],
&local_output[(ii+7) * (BLOCKSIZE*fsso->blockmultiple) + ((jj-1) * BLOCKSIZE)],
fsso->keyL,
fsso->keyL,
fsso->keyL,
fsso->keyL,
fsso->keyL,
fsso->keyL,
fsso->keyL,
fsso->keyL
);
#pragma omp simd aligned(local_output:16)
for (size_t kk = 0; kk < BLOCKSIZE/sizeof(uint64_t); kk++) {
acc.data[kk] ^= ((uint64_t *)(&local_output[(ii+0) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)]))[kk]
^ ((uint64_t *)(&local_output[(ii+1) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)]))[kk]
^ ((uint64_t *)(&local_output[(ii+2) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)]))[kk]
^ ((uint64_t *)(&local_output[(ii+3) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)]))[kk]
^ ((uint64_t *)(&local_output[(ii+4) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)]))[kk]
^ ((uint64_t *)(&local_output[(ii+5) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)]))[kk]
^ ((uint64_t *)(&local_output[(ii+6) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)]))[kk]
^ ((uint64_t *)(&local_output[(ii+7) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)]))[kk];
}
}
for (size_t ii = 8*(fsso->thislevelblocks/8); ii < fsso->thislevelblocks ; ii++) {
offline_prg(
&local_output[(ii) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)],
&local_output[(ii) * (BLOCKSIZE*fsso->blockmultiple) + ((jj-1) * BLOCKSIZE)],
fsso->keyL
);
for (size_t kk = 0; kk < BLOCKSIZE/sizeof(uint64_t); kk++) {
acc.data[kk] ^= ((uint64_t *)(&local_output[(ii) * (BLOCKSIZE*fsso->blockmultiple) + (jj * BLOCKSIZE)]))[kk];
}
}
for (size_t ii = 0; ii < BLOCKSIZE/sizeof(uint64_t); ii++) {
((uint64_t *)accumulator)[jj*BLOCKSIZE/sizeof(uint64_t)+ii] = acc.data[ii];
}
}
#ifdef ORAM_PROFILE_SCHEDULING
printf("END FSS CPRG OFFLINE LEVEL %d %lld\n", fsso->thislevel,current_timestamp());
#endif
}
void fss_cprg_offline_parallelizer(void* fss, void* indexp, void *blockdelta, void * local_output, void * local_bit_output, void* pd, fss_cprg_traverser_fn fn, facb_fn cbfn, void* cbpass) {
omp_set_nested(true);
#pragma omp parallel num_threads(2)
{
//OpenMP seems to get along with obliv-c just fine, so long as obliv-c only uses the master thread.
#pragma omp master
{
fn(blockdelta, local_output, local_bit_output, fss, indexp);
}
if (*cbfn!=NULL) {
#pragma omp single
{
#pragma omp task
{
if (omp_get_num_threads() > 1) cbfn(cbpass, pd);
else cbfn(cbpass, NULL);
}
}
}
}
}
fss_cprg_offline * fss_cprg_offline_new(size_t size, size_t blockmultiple, uint8_t * keyL, uint8_t * keyR) {
fss_cprg_offline * fsso = malloc(sizeof(fss_cprg_offline));
fsso->size = size;
fsso->blockmultiple = blockmultiple;
fsso->startlevel = 0;
fsso->endlevel = LOG2LL(size) + (((1 << LOG2LL(size)) < size)? 1:0);
posix_memalign(&fsso->level_data,16,(1ll<<fsso->endlevel) * BLOCKSIZE);
posix_memalign(&fsso->Z,16,(fsso->endlevel - fsso->startlevel) * BLOCKSIZE);
fsso->advicebits_l = malloc((fsso->endlevel - fsso->startlevel) * sizeof(bool));
fsso->advicebits_r = malloc((fsso->endlevel - fsso->startlevel) * sizeof(bool));
fsso->level_bits = malloc(size * sizeof(bool));
offline_prg_init();
fsso->keyL = offline_prg_keyschedule(keyL);
fsso->keyR = offline_prg_keyschedule(keyR);
return fsso;
}
void fss_cprg_offline_free(fss_cprg_offline * fsso) {
free(fsso->level_data);
free(fsso->level_bits);
free(fsso->advicebits_l);
free(fsso->advicebits_r);
free(fsso->Z);
free(fsso->keyL);
free(fsso->keyR);
free(fsso);
} |
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
struct OMPTraitProperty;
struct OMPTraitSelector;
struct OMPTraitSet;
class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<PragmaHandler> PUBLIC_DOMAIN_TECHNOLOGYHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
void MaybeDestroyTemplateIds() {
if (!TemplateIds.empty() &&
(Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
DestroyTemplateIds();
}
void DestroyTemplateIds();
/// RAII object to destroy TemplateIdAnnotations where possible, from a
/// likely-good position during parsing.
struct DestroyTemplateIdAnnotationsRAIIObj {
Parser &Self;
DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
};
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static TypeResult getTypeAnnotation(const Token &Tok) {
if (!Tok.getAnnotationValue())
return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, TypeResult T) {
assert((T.isInvalid() || T.get()) &&
"produced a valid-but-null type annotation?");
Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// Introduces zero or more scopes for parsing. The scopes will all be exited
/// when the object is destroyed.
class MultiParseScope {
Parser &Self;
unsigned NumScopes = 0;
MultiParseScope(const MultiParseScope&) = delete;
public:
MultiParseScope(Parser &Self) : Self(Self) {}
void Enter(unsigned ScopeFlags) {
Self.EnterScope(ScopeFlags);
++NumScopes;
}
void Exit() {
while (NumScopes) {
Self.ExitScope();
--NumScopes;
}
}
~MultiParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
/// Re-enter the template scopes for a declaration that might be a template.
unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), IsInterface(IsInterface),
TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
// In ParseCXXInlineMethods.cpp.
struct ReenterTemplateScopeRAII;
struct ReenterClassScopeRAII;
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
// For #pragma public_domain_technology
using PUBLIC_DOMAIN_TECHNOLOGYClauseResult = ActionResult<PUBLIC_DOMAIN_TECHNOLOGYClause *>;
static PUBLIC_DOMAIN_TECHNOLOGYClauseResult ClauseError() {
return PUBLIC_DOMAIN_TECHNOLOGYClauseResult(true);
}
static PUBLIC_DOMAIN_TECHNOLOGYClauseResult ClauseError(const DiagnosticBuilder &) {
return ClauseError();
}
static PUBLIC_DOMAIN_TECHNOLOGYClauseResult ClauseEmpty() {
return PUBLIC_DOMAIN_TECHNOLOGYClauseResult(false);
}
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseUniqueStableNameExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator(
llvm::function_ref<void(const Designation &)> CodeCompleteCB);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParsePUBLIC_DOMAIN_TECHNOLOGYStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
StmtResult ParsePUBLIC_DOMAIN_TECHNOLOGYStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
StmtResult ParsePUBLIC_DOMAIN_TECHNOLOGYStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
// For #pragma public_domain_technology
PUBLIC_DOMAIN_TECHNOLOGY::Kind
tryParsePragmaPUBLIC_DOMAIN_TECHNOLOGY(SourceLocation BeginLoc, ParsedStmtContext StmtCtx,
SmallVectorImpl<PUBLIC_DOMAIN_TECHNOLOGYClause *> &Clauses);
StmtResult ParsePragmaPUBLIC_DOMAIN_TECHNOLOGY(ParsedStmtContext StmtCtx);
PUBLIC_DOMAIN_TECHNOLOGYClauseResult ParsePUBLIC_DOMAIN_TECHNOLOGYClause(PUBLIC_DOMAIN_TECHNOLOGY::Kind PUBLIC_DOMAIN_TECHNOLOGYKind);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Whether a defining-type-specifier is permitted in a given context.
enum class AllowDefiningTypeSpec {
/// The grammar doesn't allow a defining-type-specifier here, and we must
/// not parse one (eg, because a '{' could mean something else).
No,
/// The grammar doesn't allow a defining-type-specifier here, but we permit
/// one for error recovery purposes. Sema will reject.
NoButErrorRecovery,
/// The grammar allows a defining-type-specifier here, even though it's
/// always invalid. Sema will reject.
YesButInvalid,
/// The grammar allows a defining-type-specifier here, and one can be valid.
Yes
};
/// Is this a context in which we are parsing defining-type-specifiers (and
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
return AllowDefiningTypeSpec::Yes;
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
return AllowDefiningTypeSpec::YesButInvalid;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
case DeclSpecContext::DSC_trailing:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which an opaque-enum-declaration can appear?
static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
return true;
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Determine whether we could have an enum-base.
///
/// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
/// only consider this to be an enum-base if the next token is a '{'.
///
/// \return \c false if this cannot possibly be an enum base; \c true
/// otherwise.
bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
return true;
}
return false;
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParsePUBLIC_DOMAIN_TECHNOLOGYFlashbackAttributes(ParsedAttributes &attrs);
void ParsePUBLIC_DOMAIN_TECHNOLOGYMirrorAttributes(ParsedAttributes &attrs);
void ParsePUBLIC_DOMAIN_TECHNOLOGYFunctionAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
/// it is not the current token.
void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
/// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
/// that the "end" matching the "begin" directive of kind \p BeginKind was not
/// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
/// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
OpenMPDirectiveKind ExpectedKind,
OpenMPDirectiveKind FoundKind,
SourceLocation MatchingLoc,
SourceLocation FoundLoc,
bool SkipUntilOpenMPEnd);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
/// Parses and creates OpenMP 5.0 iterators expression:
/// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
/// <range-specification> }+ ')'
ExprResult ParseOpenMPIteratorsExpr();
/// Parses allocators and traits in the context of the uses_allocator clause.
/// Expected format:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
NamedDecl *
ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
implied_vol_newton_ver2.c | //
// implied_vol_newton_ver2.c
//
//
// Created by Domenico Natella on 11/3/16.
//
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <mpi.h>
#include <omp.h>
#define SIZE 110
#define MAX_ITERATIONS 1000000
struct option{
double V_market[SIZE][2];
double K[SIZE];
double implied_vol[SIZE];
double T;
double S;
double r;
};
struct tm create_tm(int year, int month, int day){
struct tm my_time = { .tm_year=year, .tm_mon=month, .tm_mday=day,
.tm_hour=0, .tm_min=0, .tm_sec=0 };
return my_time;
}
struct option load(char* filename){
FILE* file = fopen(filename, "r");
struct option op;
fscanf(file, "%lf", &op.S);
char tmp[12],cp[2];
fscanf(file, "%s", tmp);
char s[2] = "/";
char *token;
token = strtok(tmp, s);
int date[3]={0,0,0};
int i = 0;
while( token != NULL ){
date[i] = atoi(token);
token = strtok(NULL, s);
i++;
}
time_t now;
time(&now);
struct tm option_t = create_tm(date[0]-1900, date[1]-1, date[2]);
time_t opt_t_conv = mktime(&option_t);
double diff_t = difftime(opt_t_conv, now);
op.T = (diff_t/86400)/365.;
i=0;
while(fscanf(file, "%s", tmp)!=EOF){
if(strcmp(tmp, "c")==0 | strcmp(tmp, "p")==0) strcpy(cp,tmp);
else{
op.K[i] = atof(strtok(tmp,s));
op.V_market[i][0] = atof(strtok(NULL,s));
if(strcmp(cp, "c")==0) op.V_market[i][1] = 0.;
else if(strcmp(cp, "p")==0) op.V_market[i][1] = 1.;
}
i++;
}
op.r = 0.03;
return op;
}
double pdf(const double x) {
return (1.0/(pow(2*M_PI,0.5)))*exp(-0.5*x*x);
}
double cdf(double x){
double RT2PI = sqrt(4.0*acos(0.0));
static const double SPLIT = 7.07106781186547;
static const double N0 = 220.206867912376;
static const double N1 = 221.213596169931;
static const double N2 = 112.079291497871;
static const double N3 = 33.912866078383;
static const double N4 = 6.37396220353165;
static const double N5 = 0.700383064443688;
static const double N6 = 3.52624965998911e-02;
static const double M0 = 440.413735824752;
static const double M1 = 793.826512519948;
static const double M2 = 637.333633378831;
static const double M3 = 296.564248779674;
static const double M4 = 86.7807322029461;
static const double M5 = 16.064177579207;
static const double M6 = 1.75566716318264;
static const double M7 = 8.83883476483184e-02;
const double z = fabs(x);
double c = 0.0;
if(z<=37.0){
const double e = exp(-z*z/2.0);
if(z<SPLIT){
const double n = (((((N6*z + N5)*z + N4)*z + N3)*z + N2)*z + N1)*z + N0;
const double d = ((((((M7*z + M6)*z + M5)*z + M4)*z + M3)*z + M2)*z + M1)*z + M0;
c = e*n/d;}
else{
const double f = z + 1.0/(z + 2.0/(z + 3.0/(z + 4.0/(z + 13.0/20.0))));
c = e/(RT2PI*f);}
}
return x<=0.0 ? c : 1-c;
}
double d_j(int j, double S, double K, double r, double sigma, double T){
double d1 = (log(S/K) + (r + 0.5*sigma*sigma)*T)/(sigma*(pow(T,0.5)));
if(j==1) return d1;
else return d1-sigma*pow(T,0.5);
}
double call_price(double S, double K, double r, double sigma, double T, double type){
if(type==0.) return S * cdf(d_j(1, S, K, r, sigma, T))-K*exp(-r*T) * cdf(d_j(2, S, K, r,sigma, T));
else return K*exp(-r*T) * cdf(d_j(2, S, K, r,sigma, T)) - S * cdf(d_j(1, S, K, r, sigma, T)) ;
}
double call_vega(const double S, const double K, const double r, const double sigma, const double T) {
return S * sqrt(T) * pdf(d_j(1, S, K, r, sigma, T));
}
double newton_raphson(double y_target, double init, double epsilon, double S, double K, double r, double T, double type){
double x = init;
double y = call_price(S, K, r, x, T,type);
int i=0;
while (fabs(y-y_target) > epsilon) {
if(i >= MAX_ITERATIONS) break;
double d_x = call_vega(S, K, r, x, T);
x += (y-y_target)/d_x;
y = call_price(S,K,r,x,T,type);
i++;
}
if(isnan(x)!=0) return 0.;
else return fabs(x);
}
int main(int argc, char** argv){
// First we create the parameter list
// S: Underlying spot price
// K: Strike price
// r: Risk-free rate (5%)
// T: One year until expiry
// C_M: Option market price
int rank,size,len=7, num_volatility=14;
double low_vol = 0.3, epsilon = 0.001, t0, t1;
struct option op[len], toReturn[len];
int err = MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size < 1) {
printf("Requires five processes.\n");
MPI_Finalize();
exit(-1);
}
int blocklen[6] = {SIZE*2,SIZE,SIZE,1,1,1};
MPI_Datatype types[6] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE,MPI_DOUBLE,MPI_DOUBLE,MPI_DOUBLE};
MPI_Datatype mpi_op_type;
MPI_Aint offsets[6];
offsets[0] = offsetof(struct option, V_market);
offsets[1] = offsetof(struct option, K);
offsets[2] = offsetof(struct option, implied_vol);
offsets[3] = offsetof(struct option, T);
offsets[4] = offsetof(struct option, S);
offsets[5] = offsetof(struct option, r);
MPI_Type_create_struct(6, blocklen, offsets, types, &mpi_op_type);
MPI_Type_commit(&mpi_op_type);
int div = len/(size-1), mod = len%(size-1), k, i, j, count;
if(rank == 0){
op[0] = load("./OPT_AAPL/Options_20161118.txt");
op[1] = load("./OPT_AAPL/Options_2017120.txt");
op[2] = load("./OPT_AAPL/Options_2017317.txt");
op[3] = load("./OPT_AAPL/Options_2017421.txt");
op[4] = load("./OPT_AAPL/Options_2017616.txt");
op[5] = load("./OPT_AAPL/Options_20171117.txt");
op[6] = load("./OPT_AAPL/Options_2018119.txt");
count=1;
for (k=0; k<(len-mod); k+=div) {
MPI_Send(&op[k], div, mpi_op_type, count, 0, MPI_COMM_WORLD);
count++;
}
count=1;
for (k=(len-mod); k<len; k++) {
MPI_Send(&op[k], 1, mpi_op_type, count, 0, MPI_COMM_WORLD);
count++;
}
int tmp=0, tmp_uno=0, tmp_due;
for (k=1; k<size; k++) {
if (k<=mod) tmp = div+1;
else tmp=div;
MPI_Recv(&toReturn, tmp, mpi_op_type, k, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
tmp_due=0;
for (i=tmp_uno; i<(tmp_uno+tmp); i++) {
op[i] = toReturn[tmp_due];
tmp_due++;
}
tmp_uno+=tmp;
}
for(i=0; i<len; i++){
for(j=0; j<num_volatility; j++) printf("Implied vol. for time %.2f is %.2f%% \n", (op[i].T), op[i].implied_vol[j]);
}
fflush(stdout);
}else{
MPI_Recv(&op, div, mpi_op_type, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
#pragma omp parallel for default(private) shared(low_vol, epsilon, op) schedule(guided)
for (k=0; k<div; k++) {
for (i=0; i<num_volatility; i++)
op[k].implied_vol[i] = newton_raphson(op[k].V_market[i][0], low_vol, epsilon, op[k].S, op[k].K[i], op[k].r, op[k].T, op[k].V_market[i][1]);
}
if(rank<=mod){
MPI_Status status;
MPI_Probe(0, 0, MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_INT, &count);
if(count>0){
MPI_Recv(&op[div], 1, mpi_op_type, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
#pragma omp parallel for default(private) shared(low_vol, epsilon, op) schedule(guided)
for (i=0; i<num_volatility; i++)
op[div].implied_vol[i] = newton_raphson(op[div].V_market[i][0], low_vol, epsilon, op[div].S, op[div].K[i], op[div].r, op[div].T, op[div].V_market[i][1]);
}
}
if(rank<=mod) MPI_Send(&op, div+1, mpi_op_type, 0, 0, MPI_COMM_WORLD);
else MPI_Send(&op, div, mpi_op_type, 0, 0, MPI_COMM_WORLD);
}
MPI_Type_free(&mpi_op_type);
MPI_Finalize();
return 0;
}
|
nesting-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void
f1 (void)
{
int i, j;
#pragma omp for
for (i = 0; i < 3; i++)
{
#pragma omp for /* { dg-warning "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
#pragma omp sections /* { dg-warning "may not be closely nested" } */
{
;
#pragma omp section
;
}
#pragma omp single /* { dg-warning "may not be closely nested" } */
;
#pragma omp master /* { dg-warning "may not be closely nested" } */
;
#pragma omp barrier /* { dg-warning "may not be closely nested" } */
}
#pragma omp sections
{
#pragma omp for /* { dg-warning "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
#pragma omp sections /* { dg-warning "may not be closely nested" } */
{
;
#pragma omp section
;
}
#pragma omp single /* { dg-warning "may not be closely nested" } */
;
#pragma omp master /* { dg-warning "may not be closely nested" } */
;
#pragma omp section
;
}
#pragma omp single
{
#pragma omp for /* { dg-warning "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
#pragma omp sections /* { dg-warning "may not be closely nested" } */
{
;
#pragma omp section
;
}
#pragma omp single /* { dg-warning "may not be closely nested" } */
;
#pragma omp master /* { dg-warning "may not be closely nested" } */
;
#pragma omp barrier /* { dg-warning "may not be closely nested" } */
}
#pragma omp master
{
#pragma omp for /* { dg-warning "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
#pragma omp sections /* { dg-warning "may not be closely nested" } */
{
;
#pragma omp section
;
}
#pragma omp single /* { dg-warning "may not be closely nested" } */
;
#pragma omp master
;
#pragma omp barrier /* { dg-warning "may not be closely nested" } */
}
#pragma omp task
{
#pragma omp for /* { dg-warning "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
#pragma omp sections /* { dg-warning "may not be closely nested" } */
{
;
#pragma omp section
;
}
#pragma omp single /* { dg-warning "may not be closely nested" } */
;
#pragma omp master /* { dg-warning "may not be closely nested" } */
;
#pragma omp barrier /* { dg-warning "may not be closely nested" } */
}
#pragma omp parallel
{
#pragma omp for
for (j = 0; j < 3; j++)
;
#pragma omp sections
{
;
#pragma omp section
;
}
#pragma omp single
;
#pragma omp master
;
#pragma omp barrier
}
}
void
f2 (void)
{
int i, j;
#pragma omp ordered
{
#pragma omp for /* { dg-warning "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
#pragma omp sections /* { dg-warning "may not be closely nested" } */
{
;
#pragma omp section
;
}
#pragma omp single /* { dg-warning "may not be closely nested" } */
;
#pragma omp master
;
#pragma omp barrier /* { dg-warning "may not be closely nested" } */
}
}
void
f3 (void)
{
#pragma omp critical
{
#pragma omp ordered /* { dg-warning "may not be closely nested" } */
;
}
}
void
f4 (void)
{
#pragma omp task
{
#pragma omp ordered /* { dg-warning "may not be closely nested" } */
;
}
}
void
f5 (void)
{
int i;
#pragma omp for
for (i = 0; i < 10; i++)
{
#pragma omp ordered /* { dg-warning "must be closely nested" } */
;
}
#pragma omp for ordered
for (i = 0; i < 10; i++)
{
#pragma omp ordered
;
}
}
void
f6 (void)
{
#pragma omp critical (foo)
#pragma omp critical (bar)
;
#pragma omp critical
#pragma omp critical (baz)
;
}
void
f7 (void)
{
#pragma omp critical (foo2)
#pragma omp critical
;
#pragma omp critical (bar)
#pragma omp critical (bar) /* { dg-warning "may not be nested" } */
;
#pragma omp critical
#pragma omp critical /* { dg-warning "may not be nested" } */
;
}
|
horn_schunck_pyramidal.c | // This program is free software: you can use, modify and/or redistribute it
// under the terms of the simplified BSD License. You should have received a
// copy of this license along this program. If not, see
// <http://www.opensource.org/licenses/bsd-license.html>.
//
// Copyright (C) 2012, Javier Sánchez Pérez <jsanchez@dis.ulpgc.es>
// All rights reserved.
#ifndef HORN_SCHUNCK_PYRAMIDAL_C
#define HORN_SCHUNCK_PYRAMIDAL_C
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "mask.c"
#include "zoom.c"
#include "xmalloc.c"
#define SOR_EXTRAPOLATION_PARAMETER 1.9
#define INPUT_PRESMOOTHING_SIGMA 0.8
/**
*
* Function to compute the SOR iteration at a given position
* (SOR = Successive Over-Relaxation)
*
*/
static float sor_iteration(
const float *Au, // constant part of the numerator of u
const float *Av, // constant part of the numerator of v
const float *Du, // denominator of u
const float *Dv, // denominator of v
const float *D, // constant part of the numerator
float *u, // x component of the flow
float *v, // y component of the flow
const float al, // alpha smoothness parameter
const int p, // current position
const int p1, // up-left neighbor
const int p2, // up-right neighbor
const int p3, // bottom-left neighbor
const int p4, // bottom-right neighbor
const int p5, // up neighbor
const int p6, // left neighbor
const int p7, // bottom neighbor
const int p8 // right neighbor
)
{
// set the SOR extrapolation parameter
const float w = SOR_EXTRAPOLATION_PARAMETER;
// compute the divergence
const float ula = 1./12. * (u[p1] + u[p2] + u[p3] + u[p4]) +
1./6. * (u[p5] + u[p6] + u[p7] + u[p8]);
const float vla = 1./12. * (v[p1] + v[p2] + v[p3] + v[p4]) +
1./6. * (v[p5] + v[p6] + v[p7] + v[p8]);
// store the previous values
const float uk = u[p];
const float vk = v[p];
// update the flow
u[p] = (1.0 - w) * uk + w * (Au[p] - D[p] * v[p] + al * ula) / Du[p];
v[p] = (1.0 - w) * vk + w * (Av[p] - D[p] * u[p] + al * vla) / Dv[p];
// return the convergence error
return (u[p] - uk) * (u[p] - uk) + (v[p] - vk) * (v[p] - vk);
}
/**
*
* Horn & Schunck method for optical flow estimation at a single scale
*
*/
void horn_schunck_optical_flow(
const float *I1, // source image
const float *I2, // target image
float *u, // x component of optical flow
float *v, // y component of optical flow
const int nx, // image width
const int ny, // image height
const float alpha, // smoothing parameter
const int warps, // number of warpings per scale
const float TOL, // stopping criterion threshold
const int maxiter, // maximum number of iterations
const bool verbose // switch on messages
)
{
if (verbose) fprintf(stderr, "Single-scale Horn-Schunck of a %dx%d "
"image\n\ta=%g nw=%d eps=%g mi=%d v=%d\n", nx, ny,
alpha, warps, TOL, maxiter, verbose);
const int size = nx * ny;
const float alpha2 = alpha * alpha;
//allocate memory
int sf = sizeof(float);
float *I2x = xmalloc(size * sf); // x derivative of I2
float *I2y = xmalloc(size * sf); // y derivative of I2
float *I2w = xmalloc(size * sf); // warping of I2
float *I2wx = xmalloc(size * sf); // warping of I2x
float *I2wy = xmalloc(size * sf); // warping of I2y
float *Au = xmalloc(size * sf); // constant part of numerator of u
float *Av = xmalloc(size * sf); // constant part of numerator of v
float *Du = xmalloc(size * sf); // denominator of u
float *Dv = xmalloc(size * sf); // denominator of v
float *D = xmalloc(size * sf); // common numerator of u and v
// compute the gradient of the second image
gradient(I2, I2x, I2y, nx, ny);
// iterative approximation to the Taylor expansions
for(int n = 0; n < warps; n++)
{
if(verbose) fprintf(stderr, "Warping %d:", n);
// warp the second image and its derivatives
bicubic_interpolation_warp(I2, u, v, I2w, nx, ny, true);
bicubic_interpolation_warp(I2x, u, v, I2wx, nx, ny, true);
bicubic_interpolation_warp(I2y, u, v, I2wy, nx, ny, true);
// store the constant parts of the system
for(int i = 0; i < size; i++)
{
const float I2wl = I2wx[i] * u[i] + I2wy[i] * v[i];
const float dif = I1[i] - I2w[i] + I2wl;
Au[i] = dif * I2wx[i];
Av[i] = dif * I2wy[i];
Du[i] = I2wx[i] * I2wx[i] + alpha2;
Dv[i] = I2wy[i] * I2wy[i] + alpha2;
D[i] = I2wx[i] * I2wy[i];
}
int niter = 0;
float error = 1000;
// iterations of the SOR numerical scheme
while(error > TOL && niter < maxiter)
{
niter++;
error = 0;
//process the central part of the optical flow
#pragma omp parallel for reduction(+:error)
for(int i = 1; i < ny-1; i++)
for(int j = 1; j < nx-1; j++)
{
const int k = i * nx + j;
error += sor_iteration(
Au, Av, Du, Dv, D, u, v, alpha2,
k, k-nx-1, k-nx+1, k+nx-1,
k+nx+1, k-nx, k-1, k+nx, k+1
);
}
// process the first and last rows
for(int j = 1; j < nx-1; j++)
{
// first row
int k = j;
error += sor_iteration(
Au, Av, Du, Dv, D, u, v, alpha2,
k, k-1, k+1, k+nx-1, k+nx+1,
k, k-1, k+nx, k+1
);
// last row
k = (ny-1) * nx + j;
error += sor_iteration(
Au, Av, Du, Dv, D, u, v, alpha2,
k, k-nx-1, k-nx+1, k-1, k+1,
k-nx, k-1, k, k+1
);
}
// process the first and last columns
for(int i = 1; i < ny-1; i++)
{
// first column
int k = i * nx;
error += sor_iteration(
Au, Av, Du, Dv, D, u, v, alpha2,
k, k-nx, k-nx+1, k+nx, k+nx+1,
k-nx, k, k+nx, k+1
);
// last column
k = (i+1) * nx - 1;
error += sor_iteration(
Au, Av, Du, Dv, D, u, v, alpha2,
k, k-nx-1, k-nx, k+nx-1, k+nx,
k-nx, k-1, k+nx, k
);
}
// process the corners
// up-left corner
error += sor_iteration(
Au, Av, Du, Dv, D, u, v, alpha2,
0, 0, 1, nx, nx+1,
0, 0, nx, 1
);
// up-right corner
int k = nx - 1;
error += sor_iteration(
Au, Av, Du, Dv, D, u, v, alpha2,
k, k-1, k, k+nx-1, k+nx,
k, k-1, k+nx, k
);
// bottom-left corner
k = (ny-1) * nx;
error += sor_iteration(
Au, Av, Du, Dv, D, u, v, alpha2,
k, k-nx, k-nx+1,k, k+1,
k-nx, k, k, k+1
);
// bottom-right corner
k = ny * nx - 1;
error += sor_iteration(
Au, Av, Du, Dv, D, u, v, alpha2,
k, k-1, k, k-nx-1, k-nx,
k-nx, k-1, k, k
);
error = sqrt(error / size);
}
if(verbose)
fprintf(stderr, "Iterations %d (%g)\n", niter, error);
}
// free the allocated memory
free(I2x);
free(I2y);
free(I2w);
free(I2wx);
free(I2wy);
free(Au);
free(Av);
free(Du);
free(Dv);
free(D);
}
// compute the largest number of an array
static float max_element(const float *x, int n)
{
int r = 0;
for (int i = 1; i < n; i++)
if (x[i] > x[r])
r = i;
return x[r];
}
// compute the smallest number of an array
static float min_element(const float *x, int n)
{
int r = 0;
for (int i = 1; i < n; i++)
if (x[i] < x[r])
r = i;
return x[r];
}
/**
*
* Function to normalize the images between 0 and 255
*
**/
static void image_normalization(
const float *I1, // first image
const float *I2, // second image
float *I1n, // first normalized image
float *I2n, // second normalized image
int size // size of each image
)
{
// find the max and min of both images
const float max1 = max_element(I1, size);
const float max2 = max_element(I2, size);
const float min1 = min_element(I1, size);
const float min2 = min_element(I2, size);
// obtain the absolute max and min
const float max = max1 > max2 ? max1 : max2;
const float min = min1 < min2 ? min1 : min2;
const float den = max - min;
if(den > 0)
// normalize both images
for(int i = 0; i < size; i++)
{
I1n[i] = 255.0 * (I1[i] - min) / den;
I2n[i] = 255.0 * (I2[i] - min) / den;
}
else
// copy the original images
for(int i = 0; i < size; i++)
{
I1n[i] = I1[i];
I2n[i] = I2[i];
}
}
/**
*
* Procedure to handle the pyramidal approach.
* This procedure relies on the previous functions to calculate
* large optical flow fields using a pyramidal scheme.
*
*/
void horn_schunck_pyramidal(
const float *I1, // source image
const float *I2, // target image
float *u, // x component of optical flow
float *v, // y component of optical flow
const int nx, // image width
const int ny, // image height
const float alpha, // smoothing weight
const int nscales, // number of scales
const float zfactor, // zoom factor
const int warps, // number of warpings per scale
const float TOL, // stopping criterion threshold
const int maxiter, // maximum number of iterations
const bool verbose // switch on messages
)
{
if (verbose) fprintf(stderr, "Multiscale Horn-Schunck of a %dx%d pair"
"\n\ta=%g ns=%d zf=%g nw=%d eps=%g mi=%d\n", nx, ny,
alpha, nscales, zfactor, warps, TOL, maxiter);
int size = nx * ny;
float *I1s[nscales];
float *I2s[nscales];
float *us[nscales];
float *vs[nscales];
int nxx[nscales];
int nyy[nscales];
I1s[0] = xmalloc(size * sizeof(float));
I2s[0] = xmalloc(size * sizeof(float));
// normalize the finest scale images between 0 and 255
image_normalization(I1, I2, I1s[0], I2s[0], size);
// presmoothing the finest scale images
gaussian(I1s[0], nx, ny, INPUT_PRESMOOTHING_SIGMA);
gaussian(I2s[0], nx, ny, INPUT_PRESMOOTHING_SIGMA);
us[0] = u;
vs[0] = v;
nxx[0] = nx;
nyy[0] = ny;
// create the scales
for(int s = 1; s < nscales; s++)
{
zoom_size(nxx[s-1], nyy[s-1], nxx+s, nyy+s, zfactor);
const int sizes = nxx[s] * nyy[s];
I1s[s] = xmalloc(sizes * sizeof(float));
I2s[s] = xmalloc(sizes * sizeof(float));
us[s] = xmalloc(sizes * sizeof(float));
vs[s] = xmalloc(sizes * sizeof(float));
// compute the zoom from the previous finer scale
zoom_out(I1s[s-1], I1s[s], nxx[s-1], nyy[s-1], zfactor);
zoom_out(I2s[s-1], I2s[s], nxx[s-1], nyy[s-1], zfactor);
}
// initialize the flow
for (int i = 0; i < nxx[nscales-1] * nyy[nscales-1]; i++)
{
us[nscales-1][i] = 0;
vs[nscales-1][i] = 0;
}
// pyramidal approximation to the optic flow
for(int s = nscales-1; s >= 0; s--)
{
if(verbose)
fprintf(stderr, "Scale: %d %dx%d\n", s, nxx[s], nyy[s]);
// compute the optical flow at this scale
horn_schunck_optical_flow(
I1s[s], I2s[s], us[s], vs[s], nxx[s], nyy[s],
alpha, warps, TOL, maxiter, verbose
);
// if this was the last scale, finish now
if (!s) break;
// otherwise, upsample the optical flow
// zoom the optic flow for the next finer scale
zoom_in(us[s], us[s-1], nxx[s], nyy[s], nxx[s-1], nyy[s-1]);
zoom_in(vs[s], vs[s-1], nxx[s], nyy[s], nxx[s-1], nyy[s-1]);
// scale the optic flow with the appropriate zoom factor
for(int i = 0; i < nxx[s-1] * nyy[s-1]; i++)
{
us[s-1][i] *= 1.0 / zfactor;
vs[s-1][i] *= 1.0 / zfactor;
}
}
// free the allocated memory
free(I1s[0]);
free(I2s[0]);
for(int i = 1; i < nscales; i++)
{
free(I1s[i]);
free(I2s[i]);
free(us[i]);
free(vs[i]);
}
}
#endif//HORN_SCHUNCK_PYRAMIDAL_C
|
regularizers.h | #ifndef REGULARIZERS_H
#define REGULARIZERS_H
template <typename D, typename I>
class None final : public Regularizer<D, I>
{
public:
typedef typename D::value_type T;
None(const ParamModel<T> &model) : Regularizer<D, I>(model){};
virtual void prox(const D &input, D &output, const T eta) const
{
output.copy(input);
};
inline T eval(const D &input) const { return 0; };
inline T fenchel(D &grad1, D &grad2) const { return 0; };
bool provides_fenchel() const { return false; };
void print() const
{
logging(logINFO) << "No regularization";
}
};
template <typename D, typename I>
class Ridge final : public Regularizer<D, I>
{
public:
typedef typename D::value_type T;
Ridge(const ParamModel<T> &model) : Regularizer<D, I>(model), _lambda(model.lambda_1){};
inline void prox(const D &input, D &output, const T eta) const
{
output.copy(input);
output.scal(T(1.0 / (1.0 + _lambda * eta)));
if (this->_intercept)
{
const int n = input.n();
output[n - 1] = input[n - 1];
}
};
inline T eval(const D &input) const
{
const int n = input.n();
const T res = input.nrm2sq();
return (this->_intercept ? T(0.5) * _lambda * (res - input[n - 1] * input[n - 1]) : T(0.5) * _lambda * res);
};
inline T fenchel(D &grad1, D &grad2) const
{
return (this->_intercept & (abs<T>(grad2[grad2.n() - 1]) >
1e-6))
? INFINITY
: this->eval(grad2) / (_lambda * _lambda);
};
void print() const
{
logging(logINFO) << getName();
}
virtual T strong_convexity() const { return this->_intercept ? 0 : _lambda; };
virtual T lambda_1() const { return _lambda; };
inline void lazy_prox(const D &input, D &output, const Vector<I> &indices, const T eta) const
{
const T scal = T(1.0) / (T(1.0) + _lambda * eta);
const int p = input.n();
const int r = indices.n();
for (int jj = 0; jj < r; ++jj)
output[indices[jj]] = scal * input[indices[jj]];
if (this->_intercept)
output[p - 1] = input[p - 1];
};
virtual bool is_lazy() const { return true; };
static inline std::string getName() { return "L2 regularization"; };
private:
const T _lambda;
};
template <typename D, typename I>
class Lasso final : public Regularizer<D, I>
{
public:
typedef typename D::value_type T;
Lasso(const ParamModel<T> &model) : Regularizer<D, I>(model), _lambda(model.lambda_1){};
inline void prox(const D &input, D &output, const T eta) const
{
input.fastSoftThrshold(output, eta * _lambda);
if (this->_intercept)
{
const int n = input.n();
output[n - 1] = input[n - 1];
}
};
inline T eval(const D &input) const
{
const int n = input.n();
const T res = input.asum();
return (this->_intercept ? _lambda * (res - abs<T>(input[n - 1])) : _lambda * res);
};
inline T fenchel(D &grad1, D &grad2) const
{
const T mm = grad2.fmaxval();
if (mm > _lambda)
grad1.scal(_lambda / mm);
return (this->_intercept & (abs<T>(grad2[grad2.n() - 1]) >
1e-6))
? INFINITY
: 0;
};
void print() const
{
logging(logINFO) << getName();
}
virtual T lambda_1() const { return _lambda; };
inline void lazy_prox(const D &input, D &output, const Vector<I> &indices, const T eta) const
{
const int p = input.n();
const int r = indices.n();
const T thrs = _lambda * eta;
//#pragma omp parallel for
for (int jj = 0; jj < r; ++jj)
output[indices[jj]] = fastSoftThrs(input[indices[jj]], thrs);
;
if (this->_intercept)
output[p - 1] = input[p - 1];
};
virtual bool is_lazy() const { return true; };
static inline std::string getName() { return "L1 regularization"; };
private:
const T _lambda;
};
template <typename D, typename I>
class ElasticNet final : public Regularizer<D, I>
{
public:
typedef typename D::value_type T;
// min_x 0.5|y-x|^2 + lambda_1 |x| + 0.5 lambda_2 x^2
// min_x - y x + 0.5 x^2 + lambda_1 |x| + 0.5 lambda_2 x^2
// min_x - y x + 0.5 (1+lambda_2) x^2 + lambda_1 |x|
// min_x - y/(1+lambda_2) x + 0.5 x^2 + lambda_1/(1+lambda_2) |x|
ElasticNet(const ParamModel<T> &model) : Regularizer<D, I>(model), _lambda(model.lambda_1), _lambda2(model.lambda_2){};
inline void prox(const D &input, D &output, const T eta) const
{
output.copy(input);
output.fastSoftThrshold(_lambda * eta);
output.scal(T(1.0) / (1 + _lambda2 * eta));
if (this->_intercept)
{
const int n = input.n();
output[n - 1] = input[n - 1];
}
};
inline T eval(const D &input) const
{
const int n = input.n();
const T res = _lambda * input.asum() + T(0.5) * _lambda2 * input.nrm2sq();
return (this->_intercept ? res - _lambda * abs<T>(input[n - 1]) - T(0.5) * _lambda2 * input[n - 1] * input[n - 1] : res);
};
// max_x xy - lambda_1 |x| - 0.5 lambda_2 x^2
// - min_x - xy + lambda_1 |x| + 0.5 lambda_2 x^2
// -(1/lambda_2) min_x - xy/lambda_2 + lambda_1/lambda_2 |x| + 0.5 x^2
// x^* = prox_(l1 lambda_1/lambda_2) [ y/lambda_2]
// x^* = prox_(l1 lambda_1) [ y] /_lambda2
inline T fenchel(D &grad1, D &grad2) const
{
D tmp;
tmp.copy(grad2);
grad2.fastSoftThrshold(_lambda);
const int n = grad2.n();
T res0 = _lambda * grad2.asum() / _lambda2 + T(0.5) * grad2.nrm2sq() / _lambda2;
if (this->_intercept)
res0 -= _lambda * abs<T>(grad2[n - 1]) / _lambda2 - T(0.5) * grad2[n - 1] * grad2[n - 1] / _lambda2;
const T res = tmp.dot(grad2) / _lambda2 - res0;
return (this->_intercept & (abs<T>(tmp[tmp.n() - 1]) >
1e-6))
? INFINITY
: res;
};
void print() const
{
logging(logINFO) << getName();
}
virtual T strong_convexity() const { return this->_intercept ? 0 : _lambda2; };
virtual T lambda_1() const { return _lambda; };
inline void lazy_prox(const D &input, D &output, const Vector<I> &indices, const T eta) const
{
const int p = input.n();
const int r = indices.n();
const T thrs = _lambda * eta;
const T scal = T(1.0) / (T(1.0) + _lambda2 * eta);
#pragma omp parallel for
for (int jj = 0; jj < r; ++jj)
output[indices[jj]] = scal * fastSoftThrs(input[indices[jj]], thrs);
;
if (this->_intercept)
output[p - 1] = input[p - 1];
};
virtual bool is_lazy() const { return true; };
static inline std::string getName() { return "Elastic Net regularization"; };
private:
const T _lambda;
const T _lambda2;
};
template <typename D, typename I>
class L1Ball final : public Regularizer<D, I>
{
public:
typedef typename D::value_type T;
L1Ball(const ParamModel<T> &model) : Regularizer<D, I>(model), _lambda(model.lambda_1){};
inline void prox(const D &input, D &output, const T eta) const
{
D tmp;
tmp.copy(input);
if (this->_intercept)
{
tmp[tmp.n() - 1] = 0;
tmp.sparseProject(output, _lambda, 1, 0, 0, 0, false);
output[output.n() - 1] = input[output.n() - 1];
}
else
{
tmp.sparseProject(output, _lambda, 1, 0, 0, 0, false);
}
};
inline T eval(const D &input) const { return 0; };
inline T fenchel(D &grad1, D &grad2) const
{
Vector<T> output;
output.copy(grad2);
if (this->_intercept)
output[output.n() - 1] = 0;
return _lambda * (output.fmaxval());
};
void print() const
{
logging(logINFO) << getName();
}
virtual T lambda_1() const { return _lambda; };
static inline std::string getName() { return "L1 ball regularization"; };
private:
const T _lambda;
};
template <typename D, typename I>
class L2Ball final : public Regularizer<D, I>
{
public:
typedef typename D::value_type T;
L2Ball(const ParamModel<T> &model) : Regularizer<D, I>(model), _lambda(model.lambda_1){};
inline void prox(const D &input, D &output, const T eta) const
{
D tmp;
tmp.copy(input);
if (this->_intercept)
{
tmp[tmp.n() - 1] = 0;
const T nrm = tmp.nrm2();
if (nrm > _lambda)
tmp.scal(_lambda / nrm);
output[output.n() - 1] = input[output.n() - 1];
}
else
{
const T nrm = tmp.nrm2();
if (nrm > _lambda)
tmp.scal(_lambda / nrm);
}
};
inline T eval(const D &input) const { return 0; };
inline T fenchel(D &grad1, D &grad2) const
{
Vector<T> output;
output.copy(grad2);
if (this->_intercept)
output[output.n() - 1] = 0;
return _lambda * (output.nrm2());
};
void print() const
{
logging(logINFO) << getName();
}
virtual T lambda_1() const { return _lambda; };
static inline std::string getName() { return "L2 ball regularization"; };
private:
const T _lambda;
};
template <typename D, typename I>
class FusedLasso final : public Regularizer<D, I>
{
public:
typedef typename D::value_type T;
FusedLasso(const ParamModel<T> &model) : Regularizer<D, I>(model), _lambda(model.lambda_1), _lambda2(model.lambda_2), _lambda3(model.lambda_3){};
inline void prox(const D &x, D &output, const T eta) const
{
output.resize(x.n());
Vector<T> copyx;
copyx.copy(x);
copyx.fusedProjectHomotopy(output, _lambda2, _lambda, _lambda3, true);
};
inline T eval(const D &x) const
{
T sum = T();
const int maxn = this->_intercept ? x.n() - 1 : x.n();
for (int i = 0; i < maxn - 1; ++i)
sum += _lambda3 * abs(x[i + 1] - x[i]) + _lambda * abs(x[i]) + T(0.5) * _lambda2 * x[i] * x[i];
sum += _lambda2 * abs(x[maxn - 1]) + 0.5 * _lambda3 * x[maxn - 1] * x[maxn - 1];
return sum;
};
inline T fenchel(D &grad1, D &grad2) const { return 0; };
void print() const
{
logging(logINFO) << getName();
}
bool provides_fenchel() const { return false; };
virtual T strong_convexity() const { return this->_intercept ? 0 : _lambda3; };
virtual T lambda_1() const { return _lambda; };
static inline std::string getName() { return "Fused Lasso regularization"; };
private:
const T _lambda;
const T _lambda2;
const T _lambda3;
};
#endif
|
GB_binop__min_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__min_int8
// A.*B function (eWiseMult): GB_AemultB__min_int8
// A*D function (colscale): GB_AxD__min_int8
// D*A function (rowscale): GB_DxB__min_int8
// C+=B function (dense accum): GB_Cdense_accumB__min_int8
// C+=b function (dense accum): GB_Cdense_accumb__min_int8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_int8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_int8
// C=scalar+B GB_bind1st__min_int8
// C=scalar+B' GB_bind1st_tran__min_int8
// C=A+scalar GB_bind2nd__min_int8
// C=A'+scalar GB_bind2nd_tran__min_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_IMIN (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_INT8 || GxB_NO_MIN_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__min_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__min_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__min_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__min_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__min_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__min_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__min_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__min_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__min_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__min_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB_bind1st_tran__min_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__min_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dmi.c | #include "clib.h"
#include "math.h"
#include "stdlib.h"
/* NOTES about the neighbours array in the arguments:
The first 6 elements of the ngbs[] are the indexes of the nearest neighbours in
the following order:
-x, +x, -y, +y, -z, +z
for every spin. It is -1 for boundaries. The array is like:
__here are next neighbour indexes
|
| 0-x, 0+x, 0-y, 0+y, 0-z, 0+z, ... 1-x, 1+x, 1-y, ... |
i=0 i=1 ...
where 0-y is the index of the neighbour of the 0th spin, in the -y direction,
for example
Thus, for every nearest neighbour ( ngbs[i + j], j=0,1,...5 ) we compute the
field contribution from the corresponding DMI
The ngbs array also gives the correct indexes for the spins at periodic
boundaries
*/
void dmi_field_bulk(double *restrict spin, double *restrict field,
double *restrict mu_s_inv,
double *restrict energy, double *restrict _D,
int *restrict ngbs, int nxyz, int n_ngbs) {
/* Bulk DMI field and energy computation
*
* The Bulk DMI Dzyaloshinskii vectors are in the same direction than the
* r_ij vectors which connect two neighbouring sites, pointing from the
* lattice site *i* towards *j*
*
* The bulk DMI is defined in 3 dimensions
*
* Then the DMI field is computed as :
*
* Sum_j ( D_ij X S_j ) = Sum_j ( r_ij X S_j )
*
* for every spin *i*
*
* NOTES:
*
* 1. Check this: can we use a SC lattice for this bulk DMI expression?
* Since, for example, MnSi crystal structure is more complex
*
* 2. This function is not defined for a hexagonal lattice, we can
* generalise the code to solve this
*
*/
/* The DMI vectors are the same according to the neighbours
* positions. Thus we set them here to avoid compute them
* every time in the loop . So, if we want the j-th NN,
* the DMI vector will be
* (D_x, D_y, D_z) --> ( dmivector[3 * j] ,
* dmivector[3 * j + 1],
* dmivector[3 * j + 2] )
*/
double dmivector[18] = {-1, 0, 0,
1, 0, 0,
0, -1, 0,
0, 1, 0,
0, 0, -1,
0, 0, 1
};
#pragma omp parallel for shared(dmivector)
for (int i = 0; i < nxyz; i++) {
int id = 0;
int id_nn = n_ngbs * i; // index for the neighbours
double fx = 0, fy = 0, fz = 0;
double D=0;
// Compute the DMI contribution for every NEAREST neighbour:
// -x, +x, -y, +y, -z, +z
for (int j = 0; j < 6; j++){
if (ngbs[id_nn + j] >= 0) {
id = 3 * ngbs[id_nn + j];
D = _D[id_nn + j];
fx += D * cross_x(dmivector[3 * j],
dmivector[3 * j + 1],
dmivector[3 * j + 2],
spin[id], spin[id+1], spin[id+2]);
fy += D * cross_y(dmivector[3 * j],
dmivector[3 * j + 1],
dmivector[3 * j + 2],
spin[id], spin[id+1], spin[id+2]);
fz += D * cross_z(dmivector[3 * j],
dmivector[3 * j + 1],
dmivector[3 * j + 2],
spin[id], spin[id+1], spin[id+2]);
}
}
field[3 * i] = fx;
field[3 * i + 1] = fy;
field[3 * i + 2] = fz;
energy[i] = -0.5 * (fx * spin[3 * i] +
fy * spin[3 * i + 1] +
fz * spin[3 * i + 2]
);
// Scale field by 1/mu_s
field[3 * i] *= mu_s_inv[i];
field[3 * i + 1] *= mu_s_inv[i];
field[3 * i + 2] *= mu_s_inv[i];
}
}
void dmi_field_interfacial_atomistic(double *restrict spin, double *restrict field,
double *restrict mu_s_inv,
double *restrict energy,
double *restrict _D,
int *restrict ngbs, int n,
int n_ngbs, int n_ngbs_dmi,
double *restrict DMI_vec) {
/* Interfacial DMI field and energy computation
*
* ngbs[] contains the *indexes* of the neighbours
* for different lattices. The number of neighbours is specified with the
* n_ngbs integer.
* The number of neighbours used to compute the 2D interfacial DMI is
* given by the n_dmi_ngbs (e.g. 4 in a square lattice, 6 in hex lattice)
*
* The Interfacial DMI needs the direction of the Dzyaloshinskii vectors,
* which are obtained as
*
* D_ij = r_ij X +z
*
* where r_ij is the vector connecting two neighbouring sites.
* This vectors are computed in Python and passed here as the DMI_vec array,
* whose entries are according to the neighbours matrix order, i.e.
* the (DMI_vec[3 * j], DMI_vec[3 * j + 1], DMI_vec[3 * j + 2]) vector
* are the Dij vector components between the i-th site and the j-th neighbour
* (we assume the vectors are the same between NNs for every lattice site)
*
*
* For instance, in a SC 2D lattice, the X -th spin has the DMI vectors:
* [ lattice site X, neighbours O ]
*
* O
* .
* ---> D_ij
* .
* ^ . ^ y
* O . | . . X . . | . O |
* . v |---> x
* .
* <---
* .
* O
*
* so DMI_vec = {0, 1, 0, 0, -1, 0, -1, 0, 0, 1, 0, 0}
* NN: j=0 j=1 j=2 j=3
*
* This DMI is only defined in two dimensions--> interfaces, thin films
* Then the DMI field is computed as : Sum_j ( D_ij X S_j )
* for every spin *i*
*
* In a hexagonal lattice we have 6 neighbours
*
* We assume a constant DMI vector magnitude
*/
#pragma omp parallel for
for (int i = 0; i < n; i++) {
int idn = n_ngbs * i; // index for the NNs
int idnm = 0; // index for the magnetisation components of the NNs
double fx = 0, fy = 0, fz = 0;
double D=0;
/* Now we compute the field contribution from every NEAREST neighbour in the plane */
for(int j = 0; j < n_ngbs_dmi; j++){
/* Check that Ms != 0 */
if (ngbs[idn + j] >= 0) {
/* Now we add the field contribution of the j-th
* neighbour for the i-th spin: D_ij X S_j */
idnm = 3 * ngbs[idn + j]; // Magnetisation component index of the j-th NN
D = _D[idnm + j];
fx += D * cross_x(DMI_vec[3 * j], DMI_vec[3 * j + 1], DMI_vec[3 * j + 2],
spin[idnm], spin[idnm + 1], spin[idnm + 2]);
fy += D * cross_y(DMI_vec[3 * j], DMI_vec[3 * j + 1], DMI_vec[3 * j + 2],
spin[idnm], spin[idnm + 1], spin[idnm + 2]);
fz += D * cross_z(DMI_vec[3 * j], DMI_vec[3 * j + 1], DMI_vec[3 * j + 2],
spin[idnm], spin[idnm + 1], spin[idnm + 2]);
}
}
field[3 * i] = fx;
field[3 * i + 1] = fy;
field[3 * i + 2] = fz;
// TODO: check whether the energy is correct or not.
/* Avoid second counting with 1/2 */
energy[i] = -0.5 * (fx * spin[3 * i] +
fy * spin[3 * i + 1] +
fz * spin[3 * i + 2]
);
// Scale field by 1/mu_s
field[3 * i] *= mu_s_inv[i];
field[3 * i + 1] *= mu_s_inv[i];
field[3 * i + 2] *= mu_s_inv[i];
}
}
inline double single_energy_x(double D, double Si[3], double Sj[3]){
double tx = D*(-Si[2]*Sj[1]+Si[1]*Sj[2]);
return tx;
}
inline double single_energy_y(double D, double Si[3], double Sj[3]){
double ty = D*(Si[2]*Sj[0]-Si[0]*Sj[2]);
return ty;
}
inline double single_energy_z(double D, double Si[3], double Sj[3]){
double tz = D*(-Si[1]*Sj[0]+Si[0]*Sj[1]);
return tz;
}
double dmi_energy(double *restrict spin, double D, int nx, int ny, int nz, int xperiodic, int yperiodic) {
int nyz = ny * nz;
int n1 = nx * nyz, n2 = 2 * n1;
int i, j, k;
int index, id;
double energy = 0;
double S_i[3],S_j[3];
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
for (k = 0; k < nz; k++) {
index = nyz * i + nz * j + k;
S_i[0] = spin[index];
S_i[1] = spin[index + n1];
S_i[2] = spin[index + n2];
if (i < nx - 1 || xperiodic) {
id = index + nyz;
if (i == nx -1){
id -= n1;
}
S_j[0] = spin[id];
S_j[1] = spin[id + n1];
S_j[2] = spin[id + n2];
energy += single_energy_x(D,S_i,S_j);
}
if (j < ny - 1 || yperiodic) {
id = index + nz;
if (j == ny-1){
id -= nyz;
}
S_j[0] = spin[id];
S_j[1] = spin[id + n1];
S_j[2] = spin[id + n2];
energy += single_energy_y(D,S_i,S_j);
}
if (k < nz - 1) {
id = index + 1;
S_j[0] = spin[id];
S_j[1] = spin[id + n1];
S_j[2] = spin[id + n2];
energy += single_energy_z(D,S_i,S_j);
}
}
}
}
return energy;
}
|
GB_binop__rdiv_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint8)
// A*D function (colscale): GB (_AxD__rdiv_uint8)
// D*A function (rowscale): GB (_DxB__rdiv_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint8)
// C=scalar+B GB (_bind1st__rdiv_uint8)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint8)
// C=A+scalar GB (_bind2nd__rdiv_uint8)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (y, x, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT8 || GxB_NO_RDIV_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 8) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 8) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pd_tv_mex.c | #include <inttypes.h>
#include <math.h>
#include <omp.h>
#include "mex.h"
#define MIN_VERBOSE (4)
#define CONVERGENCE_MOD (10)
/* preprocessor can't compare floating point numbers: */
/* THETA0 = 1 -> THETA = 0.0 */
/* THETA0 = 0 -> THETA = THETA */
#define THETA0 (0)
#define THETA (1.0)
#define OI (1 << 1) /* unused */
#define BI (1 << 2) /* unused */
#define FI (1 << 3) /* unused */
#define CI (1 << 4) /* unused */
#define OJ (1 << 5) /* unused */
#define BJ (1 << 6) /* unused */
#define FJ (1 << 7) /* unused */
#define CJ (1 << 8) /* unused */
#define OK (1 << 9) /* unused */
#define BK (1 << 10) /* unused */
#define FK (1 << 11) /* unused */
#define CK (1 << 12) /* unused */
#define INSIDE (CI + CJ + CK)
#define OUTSIDE (OI + OJ + OK)
#define MAX(a,b) (((a)>(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
struct PD
{
/* primal */
void *chi;
void *eta;
void *chi_;
void *eta_;
/* dual */
void *nu;
void *p;
/* misc */
void *f;
void *m;
void *mi;
void *mb; /* unused */
void *chi0;
void *eta0;
};
void mx_pdd(struct PD *mxpd,
const double lam, const double *h,
const double tol, const uint32_t maxiter, const uint32_t verbose);
void mx_pdf(struct PD *mxpd,
const double lam, const double *h,
const double tol, const uint32_t maxiter, const uint32_t verbose);
void update_duald(struct PD *pd,
const double lam, const double sigma,
const double *h, const size_t *sz);
void update_dualf(struct PD *pd,
const double lam, const double sigma,
const double *h, const size_t *sz);
void update_primald(struct PD *pd,
const double tau, const double *h, const size_t *sz);
void update_primalf(struct PD *pd,
const double tau, const double *h, const size_t *sz);
uint8_t convergence_checkd(double *nr1, double *nr2,
const double *chi, const double *chi0,
const double *eta, const double *eta0,
const double tol, const size_t N);
uint8_t convergence_checkf(double *nr1, double *nr2,
const float *chi, const float *chi0,
const float *eta, const float *eta0,
const double tol, const size_t N);
void init_bitmask(uint32_t *mb, uint8_t *mi, const uint8_t *m, const size_t *sz);
void mx_init(struct PD *pd, const mxArray *mxf, const mxArray *mxm);
void mx_cleanup(struct PD *pd);
void
mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs != 7) || (nlhs > 1)) {
mexErrMsgTxt(
"Usage: chi = pd_tv_mex(d2f, mask, vsz, lam, tol, maxiter, verbose);"
);
return;
}
const double *h = (const double *)mxGetData(prhs[2]);
const double lam = (const double)mxGetScalar(prhs[3]);
const double tol = (const double)mxGetScalar(prhs[4]);
const uint32_t maxiter = (const uint32_t)mxGetScalar(prhs[5]);
const uint32_t verbose = (const uint32_t)mxGetScalar(prhs[6]);
struct PD mxpd;
mx_init(&mxpd, prhs[0], prhs[1]);
if (mxIsSingle(prhs[0])) {
mx_pdf(&mxpd, lam, h, tol, maxiter, verbose);
} else {
mx_pdd(&mxpd, lam, h, tol, maxiter, verbose);
}
plhs[0] = mxDuplicateArray(mxpd.chi);
mx_cleanup(&mxpd);
return;
}
void
mx_pdf(struct PD *mxpd,
const double lam, const double *h,
const double tol, const uint32_t maxiter, const uint32_t verbose)
{
uint32_t i, l, pmod;
double nr1, nr2;
const size_t N = (const size_t)mxGetNumberOfElements(mxpd->f);
const size_t *sz = (const size_t *)mxGetDimensions(mxpd->f);
struct PD pd;
pd.chi = (float *)mxGetData(mxpd->chi);
pd.eta = (float *)mxGetData(mxpd->eta);
pd.chi_ = (float *)mxGetData(mxpd->chi_);
pd.eta_ = (float *)mxGetData(mxpd->eta_);
pd.nu = (float *)mxGetData(mxpd->nu);
pd.p = (float *)mxGetData(mxpd->p);
pd.f = (float *)mxGetData(mxpd->f);
pd.m = (uint8_t *)mxGetData(mxpd->m);
pd.mi = (uint8_t *)mxGetData(mxpd->mi);
pd.mb = (uint32_t *)mxGetData(mxpd->mb);
pd.chi0 = (float *)mxGetData(mxpd->chi0);
pd.eta0 = (float *)mxGetData(mxpd->eta0);
float *f = pd.f;
float *chi = pd.chi;
float *eta = pd.eta;
float *chi0 = pd.chi0;
float *eta0 = pd.eta0;
double gnrm = 4.0 * (1.0/(h[0]*h[0]) + 1.0/(h[1]*h[1]) + 1.0/(h[2]*h[2]));
double wnrm = 6.0 * (1.0/(h[0]*h[0]) + 1.0/(h[1]*h[1]) + 2.0/(h[2]*h[2])) / 3.0;
double K2 = gnrm*gnrm + wnrm*wnrm;
const double tau = 1/sqrt(K2);
const double sigma = 1/(tau*K2);
#pragma omp parallel for private(l) schedule(static)
for(l = 0; l < N; l += 1) {
f[l] = ((float)sigma)*f[l];
}
uint8_t flag = 0;
nr1 = 1.0;
nr2 = 1.0;
pmod = maxiter / MIN(MAX(verbose, MIN_VERBOSE), maxiter);
if (verbose) {
mexPrintf("Iter\t\t||delta chi||\t||delta eta||\n");
}
for(i = 0; i < maxiter; ++i) {
if (((i+1) % CONVERGENCE_MOD) == 0) {
#pragma omp parallel for private(l) schedule(static)
for(l = 0; l < N; l += 1) {
chi0[l] = chi[l];
eta0[l] = eta[l];
}
}
update_dualf(&pd, lam, sigma, h, sz);
update_primalf(&pd, tau, h, sz);
if (((i+1) % CONVERGENCE_MOD) == 0) {
flag = convergence_checkf(&nr1, &nr2, chi, chi0, eta, eta0, tol, N);
}
if (verbose && (i == 0 || i == maxiter-1 || ((i+1) % pmod) == 0 || flag)) {
mexPrintf("%5d/%d\t%.3e\t%.3e\n", i+1, maxiter, nr1, nr2);
}
if (flag) {
break;
}
}
return;
}
void
mx_pdd(struct PD *mxpd,
const double lam, const double *h,
const double tol, const uint32_t maxiter, const uint32_t verbose)
{
uint32_t i, l, pmod;
double nr1, nr2;
const size_t N = (const size_t)mxGetNumberOfElements(mxpd->f);
const size_t *sz = (const size_t *)mxGetDimensions(mxpd->f);
struct PD pd;
pd.chi = (double *)mxGetData(mxpd->chi);
pd.eta = (double *)mxGetData(mxpd->eta);
pd.chi_ = (double *)mxGetData(mxpd->chi_);
pd.eta_ = (double *)mxGetData(mxpd->eta_);
pd.nu = (double *)mxGetData(mxpd->nu);
pd.p = (double *)mxGetData(mxpd->p);
pd.f = (double *)mxGetData(mxpd->f);
pd.m = (uint8_t *)mxGetData(mxpd->m);
pd.mi = (uint8_t *)mxGetData(mxpd->mi);
pd.mb = (uint32_t *)mxGetData(mxpd->mb);
pd.chi0 = (double *)mxGetData(mxpd->chi0);
pd.eta0 = (double *)mxGetData(mxpd->eta0);
double *f = pd.f;
double *chi = pd.chi;
double *eta = pd.eta;
double *chi0 = pd.chi0;
double *eta0 = pd.eta0;
double gnrm = 4.0 * (1.0/(h[0]*h[0]) + 1.0/(h[1]*h[1]) + 1.0/(h[2]*h[2]));
double wnrm = 6.0 * (1.0/(h[0]*h[0]) + 1.0/(h[1]*h[1]) + 2.0/(h[2]*h[2])) / 3.0;
double K2 = gnrm*gnrm + wnrm*wnrm;
const double tau = 1/sqrt(K2);
const double sigma = 1/(tau*K2);
#pragma omp parallel for private(l) schedule(static)
for(l = 0; l < N; l += 1) {
f[l] = sigma*f[l];
}
uint8_t flag = 0;
nr1 = 1.0;
nr2 = 1.0;
pmod = maxiter / MIN(MAX(verbose, MIN_VERBOSE), maxiter);
if (verbose) {
mexPrintf("Iter\t\t||delta chi||\t||delta eta||\n");
}
for(i = 0; i < maxiter; ++i) {
if (((i+1) % CONVERGENCE_MOD) == 0) {
#pragma omp parallel for private(l) schedule(static)
for(l = 0; l < N; l += 1) {
chi0[l] = chi[l];
eta0[l] = eta[l];
}
}
update_duald(&pd, lam, sigma, h, sz);
update_primald(&pd, tau, h, sz);
if (((i+1) % CONVERGENCE_MOD) == 0) {
flag = convergence_checkd(&nr1, &nr2, chi, chi0, eta, eta0, tol, N);
}
if (verbose && (i == 0 || i == maxiter-1 || ((i+1) % pmod) == 0 || flag)) {
mexPrintf("%5d/%d\t%.3e\t%.3e\n", i+1, maxiter, nr1, nr2);
}
if (flag) {
break;
}
}
return;
}
/*
* nu <- nu + sigma*(laplace(eta_) - wave(chi_) + f)
* p <- P_{||.||_inf <= lam}(p + sigma*grad(chi_))
*/
void
update_dualf(struct PD *pd,
const double lam, const double sigma,
const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
float x;
float *nu = (float *)pd->nu;
float *p = (float *)pd->p;
#if !THETA0
float *chi_ = (float *)pd->chi_;
float *eta_ = (float *)pd->eta_;
#else
float *chi_ = (float *)pd->chi;
float *eta_ = (float *)pd->eta;
#endif
float *f = (float *)pd->f;
uint8_t *m = (uint8_t *)pd->mi;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const float hx = (float)(sigma/h[0]);
const float hy = (float)(sigma/h[1]);
const float hz = (float)(sigma/h[2]);
const float lhx = (float)(sigma/(h[0]*h[0]));
const float lhy = (float)(sigma/(h[1]*h[1]));
const float lhz = (float)(sigma/(h[2]*h[2]));
const float lhh = -2.0f*(lhx+lhy+lhz);
const float whx = (float)(-sigma/(3.0*h[0]*h[0]));
const float why = (float)(-sigma/(3.0*h[1]*h[1]));
const float whz = (float)( sigma*2.0/(3.0*h[2]*h[2]));
const float whh = -2.0f*(whx+why+whz);
const float lam_ = (float)lam;
const float lam2_ = (float)(lam*lam);
float *p1 = &p[0*(nxny*nz)];
float *p2 = &p[1*(nxny*nz)];
float *p3 = &p[2*(nxny*nz)];
#pragma omp parallel for private(i,j,k,l,x) schedule(static) \
if (nxny*nz > 32*32*32)
for(k = nxny; k < NZ; k += nxny) {
for(j = nx; j < NY; j += nx) {
l = 1 + j + k;
for(i = 1; i < NX; ++i, ++l) {
if (m[l] != 0) {
nu[l] = nu[l] + (
/* laplace(eta_) */
(lhh*eta_[l] +
lhx*(eta_[l-1] + eta_[l+1]) +
lhy*(eta_[l-nx] + eta_[l+nx]) +
lhz*(eta_[l-nxny] + eta_[l+nxny])) -
/* - wave(chi_) */
(whh*chi_[l] +
whx*(chi_[l-1] + chi_[l+1]) +
why*(chi_[l-nx] + chi_[l+nx]) +
whz*(chi_[l-nxny] + chi_[l+nxny])) +
/* + f */
f[l]
);
/* p + sigma*grad(chi_) */
p1[l] = p1[l] + hx*(chi_[l+1]-chi_[l]);
p2[l] = p2[l] + hy*(chi_[l+nx]-chi_[l]);
p3[l] = p3[l] + hz*(chi_[l+nxny]-chi_[l]);
/* p <- P_{||.||_inf <= lam}(p) */
x = p1[l]*p1[l] + p2[l]*p2[l] + p3[l]*p3[l];
if (x > lam2_) {
x = lam_/sqrtf(x);
p1[l] = p1[l] * x;
p2[l] = p2[l] * x;
p3[l] = p3[l] * x;
}
}
}
}
}
return;
}
/*
* nu <- nu + sigma*(laplace(eta_) - wave(chi_) + f)
* p <- P_{||.||_inf <= lam}(p + sigma*grad(chi_))
*/
void
update_duald(struct PD *pd,
const double lam, const double sigma,
const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
double x;
double *nu = (double *)pd->nu;
double *p = (double *)pd->p;
#if !THETA0
double *chi_ = (double *)pd->chi_;
double *eta_ = (double *)pd->eta_;
#else
double *chi_ = (double *)pd->chi;
double *eta_ = (double *)pd->eta;
#endif
double *f = (double *)pd->f;
uint8_t *m = (uint8_t *)pd->mi;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const double hx = sigma/h[0];
const double hy = sigma/h[1];
const double hz = sigma/h[2];
const double lhx = sigma/(h[0]*h[0]);
const double lhy = sigma/(h[1]*h[1]);
const double lhz = sigma/(h[2]*h[2]);
const double lhh = -2.0*(lhx+lhy+lhz);
const double whx = -sigma/(3.0*h[0]*h[0]);
const double why = -sigma/(3.0*h[1]*h[1]);
const double whz = sigma*2.0/(3.0*h[2]*h[2]);
const double whh = -2.0*(whx+why+whz);
const double lam2 = lam*lam;
double *p1 = &p[0*(nxny*nz)];
double *p2 = &p[1*(nxny*nz)];
double *p3 = &p[2*(nxny*nz)];
#pragma omp parallel for private(i,j,k,l,x) schedule(static) \
if (nxny*nz > 32*32*32)
for(k = nxny; k < NZ; k += nxny) {
for(j = nx; j < NY; j += nx) {
l = 1 + j + k;
for(i = 1; i < NX; ++i, ++l) {
if (m[l] != 0) {
nu[l] = nu[l] + (
/* laplace(eta_) */
(lhh*eta_[l] +
lhx*(eta_[l-1] + eta_[l+1]) +
lhy*(eta_[l-nx] + eta_[l+nx]) +
lhz*(eta_[l-nxny] + eta_[l+nxny])) -
/* - wave(chi_) */
(whh*chi_[l] +
whx*(chi_[l-1] + chi_[l+1]) +
why*(chi_[l-nx] + chi_[l+nx]) +
whz*(chi_[l-nxny] + chi_[l+nxny])) +
/* + f */
f[l]
);
/* p + sigma*grad(chi_) */
p1[l] = p1[l] + hx*(chi_[l+1]-chi_[l]);
p2[l] = p2[l] + hy*(chi_[l+nx]-chi_[l]);
p3[l] = p3[l] + hz*(chi_[l+nxny]-chi_[l]);
/* p <- P_{||.||_inf <= lam}(p) */
x = p1[l]*p1[l] + p2[l]*p2[l] + p3[l]*p3[l];
if (x > lam2) {
x = lam/sqrt(x);
p1[l] = p1[l] * x;
p2[l] = p2[l] * x;
p3[l] = p3[l] * x;
}
}
}
}
}
return;
}
/*
* eta <- (eta - tau*laplace(nu)) / (1 + tau)
* chi <- chi - tau*(-div(p) - wave(nu))
*
* eta_ <- eta_n+1 + THETA*(eta_n+1 - eta_n)
* chi_ <- chi_n+1 + THETA*(chi_n+1 - chi_n)
*/
void
update_primalf(struct PD *pd,
const double tau, const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
float x, y, z;
float *chi = (float *)pd->chi;
float *eta = (float *)pd->eta;
#if !THETA0
float cn, en;
float *chi_ = (float *)pd->chi_;
float *eta_ = (float *)pd->eta_;
#endif
float *nu = (float *)pd->nu;
float *p = (float *)pd->p;
uint8_t *m = (uint8_t *)pd->m;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const float hx = (float)(tau/h[0]);
const float hy = (float)(tau/h[1]);
const float hz = (float)(tau/h[2]);
const float lhx = (float)(tau/(h[0]*h[0]));
const float lhy = (float)(tau/(h[1]*h[1]));
const float lhz = (float)(tau/(h[2]*h[2]));
const float lhh = -2.0f*(lhx+lhy+lhz);
const float whx = (float)(-tau/(3.0*h[0]*h[0]));
const float why = (float)(-tau/(3.0*h[1]*h[1]));
const float whz = (float)( tau*2.0/(3.0*h[2]*h[2]));
const float whh = -2.0f*(whx+why+whz);
const float tau1 = (float)(1.0 / (1.0 + tau));
float *p1 = &p[0*(nxny*nz)];
float *p2 = &p[1*(nxny*nz)];
float *p3 = &p[2*(nxny*nz)];
#pragma omp parallel for private(i,j,k,l,x,y,z) schedule(static) \
if (nxny*nz > 32*32*32)
for(k = nxny; k < NZ; k += nxny) {
for(j = nx; j < NY; j += nx) {
l = 1 + j + k;
for(i = 1; i < NX; ++i, ++l) {
if (m[l] != 0) {
/* eta_ <- eta_n+1 + theta*(eta_n+1 - eta_n) */
/* chi_ <- chi_n+1 + theta*(chi_n+1 - chi_n) */
#if !THETA0
en = eta[l];
cn = chi[l];
#endif
x = (nu[l-1] + nu[l+1]);
y = (nu[l-nx] + nu[l+nx]);
z = (nu[l-nxny] + nu[l+nxny]);
/* eta_n+1 <- (eta_n - tau*laplace(nu)) / (1+tau) */
eta[l] = tau1 * (
eta[l] - lhh*nu[l] - lhx*x - lhy*y - lhz*z
);
/* chi_n+1 <- chi_n - tau*(-div(p) - wave(nu)) */
chi[l] = chi[l] + (
/* div(p) */
hx*(p1[l]-p1[l-1]) +
hy*(p2[l]-p2[l-nx]) +
hz*(p3[l]-p3[l-nxny]) +
/* wave(nu) */
(whh*nu[l] + whx*x + why*y + whz*z)
);
/* eta_ <- eta_n+1 + theta*(eta_n+1 - eta_n) */
/* chi_ <- chi_n+1 + theta*(chi_n+1 - chi_n) */
#if !THETA0
eta_[l] = eta[l] + ((float)THETA)*(eta[l] - en);
chi_[l] = chi[l] + ((float)THETA)*(chi[l] - cn);
#endif
}
}
}
}
return;
}
/*
* eta <- (eta - tau*laplace(nu)) / (1 + tau)
* chi <- chi - tau*(-div(p) - wave(nu))
*
* eta_ <- eta_n+1 + THETA*(eta_n+1 - eta_n)
* chi_ <- chi_n+1 + THETA*(chi_n+1 - chi_n)
*/
void
update_primald(struct PD *pd,
const double tau, const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
double x, y, z;
double *chi = (double *)pd->chi;
double *eta = (double *)pd->eta;
#if !THETA0
double cn, en;
double *chi_ = (double *)pd->chi_;
double *eta_ = (double *)pd->eta_;
#endif
double *nu = (double *)pd->nu;
double *p = (double *)pd->p;
uint8_t *m = (uint8_t *)pd->m;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const double hx = tau/h[0];
const double hy = tau/h[1];
const double hz = tau/h[2];
const double lhx = tau/(h[0]*h[0]);
const double lhy = tau/(h[1]*h[1]);
const double lhz = tau/(h[2]*h[2]);
const double lhh = -2.0*(lhx+lhy+lhz);
const double whx = -tau/(3.0*h[0]*h[0]);
const double why = -tau/(3.0*h[1]*h[1]);
const double whz = tau*2.0/(3.0*h[2]*h[2]);
const double whh = -2.0*(whx+why+whz);
const double tau1 = 1.0 / (1.0 + tau);
double *p1 = &p[0*(nxny*nz)];
double *p2 = &p[1*(nxny*nz)];
double *p3 = &p[2*(nxny*nz)];
#pragma omp parallel for private(i,j,k,l,x,y,z) schedule(static) \
if (nxny*nz > 32*32*32)
for(k = nxny; k < NZ; k += nxny) {
for(j = nx; j < NY; j += nx) {
l = 1 + j + k;
for(i = 1; i < NX; ++i, ++l) {
if (m[l] != 0) {
/* eta_ <- eta_n+1 + theta*(eta_n+1 - eta_n) */
/* chi_ <- chi_n+1 + theta*(chi_n+1 - chi_n) */
#if !THETA0
en = eta[l];
cn = chi[l];
#endif
x = (nu[l-1] + nu[l+1]);
y = (nu[l-nx] + nu[l+nx]);
z = (nu[l-nxny] + nu[l+nxny]);
/* eta_n+1 <- (eta_n - tau*laplace(nu)) / (1+tau) */
eta[l] = tau1 * (
eta[l] - lhh*nu[l] - lhx*x - lhy*y - lhz*z
);
/* chi_n+1 <- chi_n - tau*(-div(p) - wave(nu)) */
chi[l] = chi[l] + (
/* div(p) */
hx*(p1[l]-p1[l-1]) +
hy*(p2[l]-p2[l-nx]) +
hz*(p3[l]-p3[l-nxny]) +
/* wave(nu) */
(whh*nu[l] + whx*x + why*y + whz*z)
);
/* eta_ <- eta_n+1 + theta*(eta_n+1 - eta_n) */
/* chi_ <- chi_n+1 + theta*(chi_n+1 - chi_n) */
#if !THETA0
eta_[l] = eta[l] + ((double)THETA)*(eta[l] - en);
chi_[l] = chi[l] + ((double)THETA)*(chi[l] - cn);
#endif
}
}
}
}
return;
}
uint8_t
convergence_checkd(double *nr1, double *nr2,
const double *chi, const double *chi0,
const double *eta, const double *eta0,
const double tol, const size_t N)
{
size_t l;
double n1 = 0.0;
double n2 = 0.0;
double d1 = 0.0;
double d2 = 0.0;
/* can do naive summation for this */
/* pairwise summation norm is with the multigrid stuff */
#pragma omp parallel for private(l) \
reduction(+: n1, n2, d1, d2) schedule(static)
for(l = 0; l < N; l += 1) {
d1 += chi[l]*chi[l];
d2 += eta[l]*eta[l];
n1 += (chi[l]-chi0[l]) * (chi[l]-chi0[l]);
n2 += (eta[l]-eta0[l]) * (eta[l]-eta0[l]);
}
n1 = sqrt(n1/d1);
n2 = sqrt(n2/d2);
*nr1 = n1;
*nr2 = n2;
return (uint8_t)((n1 < tol) && (n2 < 2.0*tol));
}
uint8_t
convergence_checkf(double *nr1, double *nr2,
const float *chi, const float *chi0,
const float *eta, const float *eta0,
const double tol, const size_t N)
{
size_t l;
double n1 = 0.0;
double n2 = 0.0;
double d1 = 0.0;
double d2 = 0.0;
/* can do naive summation for this */
/* pairwise summation norm is with the multigrid stuff */
#pragma omp parallel for private(l) \
reduction(+: n1, n2, d1, d2) schedule(static)
for(l = 0; l < N; l += 1) {
d1 += ((double)chi[l]) * ((double)chi[l]);
d2 += ((double)eta[l]) * ((double)eta[l]);
n1 += ((double)(chi[l]-chi0[l])) * ((double)chi[l]-chi0[l]);
n2 += ((double)(eta[l]-eta0[l])) * ((double)eta[l]-eta0[l]);
}
n1 = sqrt(n1/d1);
n2 = sqrt(n2/d2);
*nr1 = n1;
*nr2 = n2;
return (uint8_t)((n1 < tol) && (n2 < 2.0*tol));
}
void
init_bitmask(uint32_t *mb, uint8_t *mi, const uint8_t *m, const size_t *sz)
{
size_t i, j, k;
size_t l;
uint8_t b, f, o;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
/*
* 0 b f c
* --------------
* i 1 2 3 4
* j 5 6 7 8
* k 9 10 11 12
*/
#pragma omp parallel for private(i,j,k,l,b,f,o) schedule(static) \
if (nxny*nz > 32*32*32)
for(k = 0; k <= NZ; k += nxny) {
for(j = 0; j <= NY; j += nx) {
l = j + k;
for(i = 0; i <= NX; ++i, ++l) {
o = 3;
f = (i < NX) && m[l+1];
mb[l] |= f ? (1 << o) : 0;
o = 2;
b = (i > 0) && m[l-1];
mb[l] |= b ? (1 << o) : 0;
o = 1;
mb[l] |= (1 << (o + 2*f + b));
o = 7;
f = (j < NY) && m[l+nx];
mb[l] |= f ? (1 << o) : 0;
o = 6;
b = (j > 0) && m[l-nx];
mb[l] |= b ? (1 << o) : 0;
o = 5;
mb[l] |= (1 << (o + 2*f + b));
o = 11;
f = (k < NZ) && m[l+nxny];
mb[l] |= f ? (1 << o) : 0;
o = 10;
b = (k > 0) && m[l-nxny];
mb[l] |= b ? (1 << o) : 0;
o = 9;
mb[l] |= (1 << (o + 2*f + b));
mb[l] = ((mb[l] & OUTSIDE) == OUTSIDE) ? 0 : mb[l];
mi[l] = ((mb[l] & INSIDE) == INSIDE);
}
}
}
return;
}
void
mx_init(struct PD *pd, const mxArray *mxf, const mxArray *mxm)
{
mxClassID T = mxGetClassID(mxf);
const size_t *sz = (const size_t *)mxGetDimensions(mxf);
const size_t szp[4] = {sz[0], sz[1], sz[2], 3};
pd->chi = mxCreateNumericArray(3, sz, T, mxREAL);
pd->eta = mxCreateNumericArray(3, sz, T, mxREAL);
#if !THETA0
pd->chi_ = mxCreateNumericArray(3, sz, T, mxREAL);
pd->eta_ = mxCreateNumericArray(3, sz, T, mxREAL);
#else
pd->chi_ = NULL;
pd->eta_ = NULL;
#endif
pd->nu = mxCreateNumericArray(3, sz, T, mxREAL);
pd->p = mxCreateNumericArray(4, szp, T, mxREAL);
pd->f = mxDuplicateArray(mxf);
pd->m = mxDuplicateArray(mxm);
pd->mi = mxCreateNumericArray(3, sz, mxUINT8_CLASS, mxREAL);
pd->mb = mxCreateNumericArray(3, sz, mxUINT32_CLASS, mxREAL);
pd->chi0 = mxCreateNumericArray(3, sz, T, mxREAL);
pd->eta0 = mxCreateNumericArray(3, sz, T, mxREAL);
uint8_t *m = (uint8_t *)mxGetData(pd->m);
uint8_t *mi = (uint8_t *)mxGetData(pd->mi);
uint32_t *mb = (uint32_t *)mxGetData(pd->mb);
init_bitmask(mb, mi, m, sz);
return;
}
void
mx_cleanup(struct PD *pd)
{
if (NULL != pd->chi) {
mxDestroyArray(pd->chi);
pd->chi = NULL;
}
if (NULL != pd->eta) {
mxDestroyArray(pd->eta);
pd->eta = NULL;
}
if (NULL != pd->chi_) {
mxDestroyArray(pd->chi_);
pd->chi_ = NULL;
}
if (NULL != pd->eta_) {
mxDestroyArray(pd->eta_);
pd->eta_ = NULL;
}
if (NULL != pd->nu) {
mxDestroyArray(pd->nu);
pd->nu = NULL;
}
if (NULL != pd->p) {
mxDestroyArray(pd->p);
pd->p = NULL;
}
if (NULL != pd->f) {
mxDestroyArray(pd->f);
pd->f = NULL;
}
if (NULL != pd->m) {
mxDestroyArray(pd->m);
pd->m = NULL;
}
if (NULL != pd->mi) {
mxDestroyArray(pd->mi);
pd->mi = NULL;
}
if (NULL != pd->mb) {
mxDestroyArray(pd->mb);
pd->mb = NULL;
}
if (NULL != pd->chi0) {
mxDestroyArray(pd->chi0);
pd->chi0 = NULL;
}
if (NULL != pd->eta0) {
mxDestroyArray(pd->eta0);
pd->eta0 = NULL;
}
return;
}
|
ProyekAkhirProglan.c | /*Proyek Akhir Pemrograman Lanjut 2021 Semester Genap
dibuat oleh:
1. Miranty Anjani Putri <2006468270>
2. Nisrina Alifah Sauda <2006577523>
3. Salma Dewi Taufiqoh <2006577473>
Pemrograman Lanjut - 02
di bawah bimbingan
Dr. Ruki Harwahyu, ST. MT. MSc.
*/
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<omp.h>
#include<windows.h>
#include<conio.h>
//Password admin = admin
//Deklarasi variabel global
int n; //Banyak daerah yang ingin diinput
int sudahInput = 0, batas[2] = {0}; //Banyak daerah yang sudah diinput, Jumlah daerah yang ingin diinput
long int votePaslon[4] = {0}; //Jumlah voting untuk setiap kandidat, maksimal paslon 4 pasangan
int banyakKandidat = 0, jumlahPemilihEligible = 0, golput = 0; /*Jumlah paslon, jumlah pemilih eligible secara skala besar (akumulasi jumlah pemilih per daerah), jumlah suara tidak sah (golput)*/
char kandidatPaslon[4][30]; //Variabel menyimpan nama pasangan calon pemimpin atau kandidat
//Struct node untuk menerima inputan suara
struct node {
long int suara;
struct node *next;
};
/*Deklarasi struct data daerah yang menyimpan nama daerah, jumlah pemilih legal di daerah, jumlah suara golput,
jumlah suara sah paslon 1, 2, 3, 4*/
struct dataDaerah {
char namaDaerah[50];
long int paslon1, paslon2, paslon3, paslon4;
long int jmlhPemilihPerDaerah, golput;
};
//Declare struct daerah
struct dataDaerah *daerah;
//Function untuk membuat node (menerima suara paslon per-inputan user)
struct node* newNode (int data, struct node* nextNode) {
struct node* node = (struct node*)malloc(sizeof(struct node));
node->suara = data;
node->next = nextNode;
return node;
}
//Function untuk membuat linked list (suara inputan user)
struct node* constructList(long int keys[], int n) {
int i;
struct node* head = NULL, *node = NULL;
for (i = n - 1; i >= 0; i--) {
node = newNode(keys[i], node);
head = node;
}
return head;
}
//Function untuk mencetak linked list (suara inputan user)
void printList(struct node* head) {
struct node* ptr = head;
while (ptr != NULL) {
printf("%d -> ", ptr->suara);
ptr = ptr->next;
}
printf("Selesai");
}
//Function Prototype
void menu(); //fungsi menu utama program
void menuAdmin(); //fungsi menu untuk admin
void passwordAdmin(); //fungsi 'perlindungan' akses untuk admin
void pemilihan(); //fungsi untuk menerima inputan data untuk daerah
void thanks(); //tampilan terima kasih setiap user selesai input data daerah
void sortDaerah(); //fungsi untuk mengurutkan daerah pada tabel berdasarkan A-Z
void printFile(); //fungsi untuk menyimpan data yang sudah diinput user ke file txt
void show(); //fungsi untuk menampilkan data pemenang (menu utama nomor 3)
void tabel(); //fungsi untuk menampilkan data inputan serta pilihan manipulasi data yang admin dapat lakukan
/*Manipulasinya pengurutan berdasarkan daerah (A-Z), ubah inputan data, hapus data, melihat data sebelumnya, dan menyimpan data*/
void descending();//function untuk melakukan sorting pemenang
void display();//function untuk menampilkan hasil inputan suara per daerah
void dataSebelum(); //function untuk melakukan pembacaan file yang berisi input sebelumnya
void ubahData();//function untuk mengubah data input
void hapusData(); // function untuk menghapus data input
void thankYou(); //function display terima kasih ketika user ingin keluar dari program
int main (){
menu();
}
//Function ini dibuat oleh Miranty Anjani Putri
void menu() {
system("CLS");
printf("\n----------------------------------------------------------------------------\n");
printf(" Selamat datang pada Program Penghitung Suara ");
printf("\n----------------------------------------------------------------------------\n");
printf("\n 1. Input User ");
printf("\n 2. Admin ");
printf("\n 3. Hasil Voting ");
printf("\n 0. Exit ");
printf("\n----------------------------------------------------------------------------\n");
printf("\n\n\t\t Pilihan Anda: ");
switch(getch()) {
fflush(stdin);
case '1':
system("CLS");
// call function input user
pemilihan();
break;
case '2':
system("CLS");
// call function agar admin bisa log-in
passwordAdmin();
break;
case '3':
system("CLS");
// cal function hasil
descending();
break;
case '0':
system("CLS");
//thankYou();
thankYou();
exit(0);
default: //Jika Input salah maka panggil kembali Modular menu untuk menerima input kembali
printf("\n\aMasukkan Input yang benar !");
printf("\nTekan (Enter) untuk memasukkan input kembali....");
getch();
system("CLS");
menu();
}
}
//function input user
void pemilihan () {
long int votePerDaerah[4] = {0};
long int sum = 0;
int x = banyakKandidat;
int i, j, k;
fflush(stdin);
if (batas[0] == batas[1]) {
printf("\n---------------------------------------------------------\n");
printf("\n Sebanyak %d data daerah telah diinput \n", n);
printf("\n---------------------------------------------------------\n");
printf("\nTekan Enter untuk kembali ke menu...");
getch();
system("CLS");
menu();
}
//Input data suara berdasarkan daerah
Input:
system("CLS");
printf("\n\t\t\t\t--------------------------------------\n");
printf("\n\t\t DATA DAERAH \n");
printf("\n\t\t\t\t--------------------------------------\n");
printf("\n");
printf("\t\t\t\t Masukkan nama daerah: \n");
printf("\t\t\t\t ");
scanf(" %[^\n]", (daerah+sudahInput)->namaDaerah);
printf("\t\t\t\t Masukkan jumlah pemilih eligible: \n");
printf("\t\t\t\t ");
scanf("%ld", &(daerah+sudahInput)->jmlhPemilihPerDaerah);
//menampilkan kandidat calon yang telah diatur oleh Admin
printf("\n\t\t\t\t Berikut daftar pasangan calon pemimpin: \n");
if (x == 1) {
for (i = 0; i < x+1; i++) {
printf("\n\t\t\t\t %d. ", i+1);
printf("%s", kandidatPaslon[i]);
}
}
else {
for (i = 0; i < x; i++) {
printf("\n\t\t\t\t %d. ", i+1);
printf("%s", kandidatPaslon[i]);
}
}
//meminta jumlah suara untuk masing-masing paslon
for (j = 0; j < x; j++) {
printf("\n\n\t\t\t\tJumlah suara sah paslon ke-%d : ", j+1);
scanf("%d", &votePerDaerah[j]);votePaslon[i] += votePerDaerah[j];
}
for (i = 0; i < x; i++) {
votePaslon[i] += votePerDaerah[i];
}
//implementasi multithreading untuk mempercepat proses penjumlahan pada array
#pragma omp parallel for reduction(+:sum)
for (k = 0; k < 4; k++) {
sum += votePerDaerah[k];
}
if(banyakKandidat == 1 || banyakKandidat == 2) {
struct node* head = constructList(votePerDaerah, 2);
//Memasukkan data voting ke struct masing-masing daerah
(daerah+sudahInput)->paslon1 = votePerDaerah[0];
(daerah+sudahInput)->paslon2 = votePerDaerah[1];
(daerah+sudahInput)->paslon3 = 0;
(daerah+sudahInput)->paslon4 = 0;
daerah[sudahInput].golput = daerah[sudahInput].jmlhPemilihPerDaerah - sum;
printf("\nData yang anda masukkan : \n");
printList(head);
}
else if(banyakKandidat == 3) {
struct node* head = constructList(votePerDaerah, 3);
(daerah+sudahInput)->paslon1 = votePerDaerah[0];
(daerah+sudahInput)->paslon2 = votePerDaerah[1];
(daerah+sudahInput)->paslon3 = votePerDaerah[2];
(daerah+sudahInput)->paslon4 = 0;
(daerah+sudahInput)->golput = ((daerah+sudahInput)->jmlhPemilihPerDaerah - sum);
printf("\nData yang anda masukkan : \n");
printList(head);
}
else if(banyakKandidat == 4) {
struct node* head = constructList(votePerDaerah, 4);
(daerah+sudahInput)->paslon1 = votePerDaerah[0];
(daerah+sudahInput)->paslon2 = votePerDaerah[1];
(daerah+sudahInput)->paslon3 = votePerDaerah[2];
(daerah+sudahInput)->paslon4 = votePerDaerah[3];
(daerah+sudahInput)->golput = (daerah+sudahInput)->jmlhPemilihPerDaerah - sum;
printf("\nData yang anda masukkan : \n");
printList(head);
}
printf("\n\nTekan (Enter) untuk melanjutkan....");
getch();
sudahInput++; //increment jumlah daerah yang sudah vote
batas[0] = sudahInput;
thanks();
}
void thanks() {
system("CLS");
printf("\n-------------------------------------------------------------\n");
printf("\n TERIMA KASIH TELAH MENGINPUT DATA\n");
printf("\n-------------------------------------------------------------\n");
printf("\n\n\nTekan (Enter) untuk voting selanjutnya.....");
getch();
menu();
}
void passwordAdmin() {
char password[6] = {"admin"}; //password yang harus dimasukkan oleh admin
char pass[15];
int i, j = 0, k;
int log = 0;
Password:
system("CLS");
printf("\n----------------------------------------------------------\n");
printf("\n SILAHKAN MASUKKAN PASSWORD \n");
printf("\n----------------------------------------------------------\n");
printf("\n\t >>> Password: ");
for (i = 0; i < 5; i++) {
pass[i] = getch();
printf("%c", '*');
}
pass[i] = '\0';
//melakukan pengecekan input password Admin
if((strcmp(pass, password)) != 0) {
j++;
printf("\nPassword yang Anda masukkan salah. Tekan enter untuk mengulang kembali.\n");
if(j == 3) {
printf("\nAnda telah salah memasukkan password sebanyak 3 kali! Mohon menunggu 5 detik!");
printf("\a");
for (k = 1; k <= 5; k++) {
printf("\n%d", k);
Sleep(1000);
}
passwordAdmin();
}
getch();
goto Password;
}
else {
printf("\n\nLOGIN SUCCESFULL");
for (k = 0; k < 6; k++) {
printf(".");
Sleep(200);
}
printf("\nTekan Enter!");
getch();
menuAdmin();
}
}
// function admin
void menuAdmin() {
int n, i;
char kosong[] = {"Kubu kosong"};
system("CLS");
system("CLS");
printf("\n-------------------------------------------------------------\n");
printf("\n\t\t\t MENU ADMIN \n");
printf("\n-------------------------------------------------------------\n");
printf("\n 1. Memasukkan batas daerah untuk diinput \n");
printf("\n 2. Menampilkan data voting \n");
printf("\n 0. Kembali ke menu \n");
printf("\n\n Note: Mohon isi batas daerah terlebih dahulu (pilihan 1). ");
printf("\n-------------------------------------------------------------\n");
printf("\n\t\t\t>>>Pilihan Anda: ");
switch(getch()) {
fflush(stdin);
case '1':
printf("\n\nMasukkan BATAS DAERAH untuk data yang ingin diinput: ");
scanf("%d", &n);
batas[1] = n;
if (batas[1] < batas[0]) {
//pengecekan batas daerah
printf("\n\nJumlah daerah telah melebihi batas yang telah dimasukkan...");
printf("\nTekan Enter untuk memasukkan input kembali...");
getch();
menuAdmin();
}
printf("\nMAKSIMAL input PASANGAN CALON adalah 4.\nMasukkan JUMLAH PASANGAN CALON: ");
scanf("%d", &banyakKandidat);
//error handling pada input banyak kandidat
if(banyakKandidat < 1 || banyakKandidat > 4) {
printf("\nAnda hanya dapat memasukkan maksimal 4 pasangan calon..");
printf("\nSilahkan masukkan angka 1-4: ");
scanf("%d", &banyakKandidat);
}
printf("\n");
if (banyakKandidat == 1) {
banyakKandidat = 2;
printf("Masukkan nama pasangan calon ke-1: ");
scanf(" %[^\n]", kandidatPaslon[0]);
strcpy(kandidatPaslon[1], kosong);
}
else {
for (i = 0; i < banyakKandidat; i++) {
printf("Masukkan nama pasangan calon ke-%d: ", i+1);
scanf(" %[^\n]", kandidatPaslon[i]);
}
}
break;
case '2':
tabel();
break;
case '0':
menu();
break;
default:
printf("\nMasukkan input yang benar!");
printf("\nTekan Enter untuk memasukkan input kembali...");
getch();
menuAdmin();
}
daerah = (struct dataDaerah*)malloc(n * sizeof(struct dataDaerah));
menu();
}
//function untuk memperlihatkan data inputan user dan menu manipulasi data yang dapat mereka lakukan
void tabel() {
int i, input, x;
system("CLS");
display();
printf("\n\nPilih metode pengurutan yang diinginkan: ");
printf("\n1. Urutkan berdasarkan nama daerah");
printf("\n2. Update data");
printf("\n3. Hapus data");
printf("\n4. Simpan data");
printf("\n5. Lihat data sebelumnya");
printf("\n0. Kembali ke menu admin");
printf("\nInput: ");
scanf("%d", &input);
if (input == 1) {
sortDaerah();
}
else if (input == 2) {
//Function ubah data
ubahData();
}
else if (input == 3) {
//Function hapus data
hapusData();
}
else if (input == 4) {
//Function simpan data
printFile();
}
else if (input == 5) {
//Function membaca file sebelumnya
dataSebelum();
}
else if (input == 0) {
menuAdmin();
}
else {
printf("\nInput Anda salah! Tekan Enter untuk memilih kembali!");
getch();
tabel();
}
printf("Tekan Enter untuk melanjutkan...");
getch();
menuAdmin();
}
//Function untuk mengurutkan daerah yang sudah diinput A-Z
void sortDaerah() {
char namaTemp[4][20];
long int jmlhPemilihTemp, suaraTemp[4], golputTemp;
int swapped, i, j;
for (i = 0; i < sudahInput; i++) {
for (j = i+1; j < sudahInput; j++) {
if((strcmp((daerah+i)->namaDaerah, (daerah+i+1)->namaDaerah)) > 0) {
//Swap Nama
strcpy(namaTemp[i], (daerah+i)->namaDaerah);
strcpy((daerah+i)->namaDaerah, (daerah+i+1)->namaDaerah);
strcpy((daerah+i+1)->namaDaerah, namaTemp[i]);
//Swap Jumlah Pemilih
jmlhPemilihTemp = (daerah+i)->jmlhPemilihPerDaerah;
(daerah+i)->jmlhPemilihPerDaerah = (daerah+i+1)->jmlhPemilihPerDaerah;
(daerah+i+1)->jmlhPemilihPerDaerah = jmlhPemilihTemp;
//Swap suara paslon
suaraTemp[0] = (daerah+i)->paslon1;
(daerah+i)->paslon1 = (daerah+i+1)->paslon1;
(daerah+i+1)->paslon1 = suaraTemp[0];
suaraTemp[1] = (daerah+i)->paslon2;
(daerah+i)->paslon2 = (daerah+i+1)->paslon2;
(daerah+i+1)->paslon2 = suaraTemp[1];
suaraTemp[2] = (daerah+i)->paslon3;
(daerah+i)->paslon3 = (daerah+i+1)->paslon3;
(daerah+i+1)->paslon3 = suaraTemp[2];
suaraTemp[3] = (daerah+i)->paslon4;
(daerah+i)->paslon4 = (daerah+i+1)->paslon4;
(daerah+i+1)->paslon4 = suaraTemp[3];
//Swap data golput
golputTemp = (daerah+i)->golput;
(daerah+i)->golput = (daerah+i+1)->golput;
(daerah+i+1)->golput = golputTemp;
}
}
}
tabel();
}
//Function untuk menampilkan data yang sudah diinput
void display() {
int i;
char header[8][20] = {"No.", "Nama Daerah", "Jumlah Pemilih", "Suara Paslon 1", "Suara Paslon 2", "Suara Paslon 3", "Suara Paslon 4", "Golput"};
printf("\n=======================================================================================================================================\n");
printf("%-5s%-20s%-20s%-20s%-20s%-20s%-20s%-20s", header[0], header[1], header[2], header[3], header[4], header[5], header[6], header[7]);
printf("\n=======================================================================================================================================\n");
for (i = 0; i < sudahInput; i++) {
printf("\n");
printf("%-5d%-20s%-20d", i+1, (daerah+i)->namaDaerah, (daerah+i)->jmlhPemilihPerDaerah);
printf("%-20d%-20d%-20d%-20d%-20d", (daerah+i)->paslon1, (daerah+i)->paslon2, (daerah+i)->paslon3, (daerah+i)->paslon4, (daerah+i)->golput);
}
}
void dataSebelum()
{
char p[4]; // variabel untuk menyimpan data
char header[8][20] = {"No.", "Nama Daerah", "Jumlah Pemilih", "Suara Paslon 1", "Suara Paslon 2", "Suara Paslon 3", "Suara Paslon 4", "Golput"};
long int a,b,c,d,e,f, g; // variabel untuk menyimpan data
FILE *fileptr;
system("CLS");
printf("\n=======================================================================================================================================\n");
printf("%-5s%-20s%-20s%-20s%-20s%-20s%-20s%-20s", header[0], header[1], header[2], header[3], header[4], header[5], header[6], header[7]);
printf("\n=======================================================================================================================================\n");
if ((fileptr = fopen ("D:\\hasil_vote.txt", "r"))==NULL)
{
printf("Error, data sebelumnya tidak ditemukan");
exit(1);
}
else {
//Membuka lalu membaca file jika untuk voting yang telah dilakukan sebelumnya(jika sudah dilakukan)
while(!feof(fileptr)) {
fscanf(fileptr, "%d %s %ld %ld %ld %ld %ld %ld\n", &g, p, &a, &b, &c, &d, &e, &f);
printf("%-5d%-20s%-20ld%-20ld%-20ld%-20ld%-20ld%-20ld\n", g, p, a, b, c, d, e, f);
}
}
fclose(fileptr);
printf("Tekan {Enter) untuk kembali ke tabel vote sekarang...");
getch();
tabel();
}
//Function untuk menyimpan data yang diinput pada file txt
void printFile() {
int i;
FILE *fileptr;
fileptr = fopen("D:\\hasil_vote.txt","w+");
if(fileptr == NULL)
{
printf("Error, Tidak dapat dijalankan..");
tabel();
}
else
{
for(i=0; i<sudahInput; i++) {
fprintf(fileptr, "%d %s %ld %ld %ld %ld %ld %ld\n", i+1, (daerah+i)->namaDaerah, (daerah+i)->jmlhPemilihPerDaerah, (daerah+i)->paslon1, (daerah+i)->paslon2, (daerah+i)->paslon3, (daerah+i)->paslon4, (daerah+i)->golput);
}
}
fclose(fileptr);
}
//function hasil
void descending (){//function untuk mengurutkan kandidat mana yang akan menjadi pemenang
int i,j, input;
long int temp;
char swap[4][50];
for (i=0; i< banyakKandidat ; ++i){
for (j=i+1 ; j<banyakKandidat ; ++j){
if (votePaslon[i] < votePaslon[j]){
temp = votePaslon[i];
votePaslon[i]=votePaslon[j];
votePaslon[j]=temp;
strcpy(swap[0], kandidatPaslon[i]);
strcpy(kandidatPaslon[i], kandidatPaslon[j]);
strcpy(kandidatPaslon[j], swap[0]);
}
}
}
show();
printf("\n\nApakah Anda ingin menyelesaikan Program?\n ");
printf("(Y/N)\n\n");
scanf(" %c",&input);
if(input=='y' || input =='Y')
{
printFile();
thanks();
}
else
menu();
}
//function untuk menampilkan hasil sorting dari function descending
void show(){
int i;
char header[3][30] = {"No.", "Nama Pemenang", "Suara Sah"};
printf("\n===============================================================================================================\n");
printf("%-5s%-20s%-20s", header[0], header[1], header[2], header[3]);
printf("\n===============================================================================================================\n");
for (i = 0; i < banyakKandidat; i++) {
printf("\n");
printf("%-5d%-20s%-20d", i+1, kandidatPaslon[i], votePaslon[i]);
}
printf ("\n");
printf ("\n\t\t\t !!! PEMENANG : %20s !!!\n", strupr(kandidatPaslon[0]));
printf ("\n");
}
//Function untuk hapus data input satu daerah
void hapusData () {
int i, j, hapus, pilihan;
printf("\n\nNomor daerah yang ingin dihapus : ");
scanf("%d", &hapus);
pilihan = hapus-1;
if(hapus >= sudahInput + 1) {
printf("\nPenghapusan tidak dapat dilakukan !");
printf("\nTekan (enter) untuk melanjutkan...");
getch();
tabel();
}
else {
jumlahPemilihEligible -= (daerah+pilihan)->jmlhPemilihPerDaerah;
golput -= (daerah+pilihan)->golput;
votePaslon[0] -= (daerah+pilihan)->paslon1;
votePaslon[1] -= (daerah+pilihan)->paslon2;
votePaslon[2] -= (daerah+pilihan)->paslon3;
votePaslon[3] -= (daerah+pilihan)->paslon4;
for(i=hapus-1; i<sudahInput-1; i++ )
{
strcpy((daerah+i)->namaDaerah, (daerah+i+1)->namaDaerah);
(daerah+i)->jmlhPemilihPerDaerah = (daerah+i+1)->jmlhPemilihPerDaerah;
(daerah+i)->golput = (daerah+i+1)->golput;
(daerah+i)->paslon1 = (daerah+i+1)->paslon1;
(daerah+i)->paslon2 = (daerah+i+1)->paslon2;
(daerah+i)->paslon3 = (daerah+i+1)->paslon3;
(daerah+i)->paslon4 = (daerah+i+1)->paslon4;
}
}
batas[0]--;
sudahInput--;
tabel();
}
//Function untuk mengubah data daerah yang diinput user
void ubahData(){
int i, j, ubah, pilihan;
long int a, b, c, d, e, f;
char temp[50];
printf("\n\nNomor daerah yang ingin diubah: ");
scanf("%d", &ubah);
pilihan = ubah-1;
if(ubah >= sudahInput + 1){
printf("\nUbah data tidak dapat dilakukan !");
printf("\nTekan (enter) untuk melanjutkan...");
getch();
tabel();
}else{
printf("Masukkan nama daerah yang baru: ");
scanf(" %[^\n]", temp);
printf("Masukkan jumlah pemilih yang baru: ");
scanf("%d", &a);
printf("Masukkan jumlah golput yang baru: ");
scanf("%d", &b);
printf("Masukkan jumlah suara sah paslon 1 yang baru: ");
scanf("%d", &c);
printf("Masukkan jumlah suara sah paslon 2 yang baru: ");
scanf("%d", &d);
printf("Masukkan jumlah suara sah paslon 3 yang baru: ");
scanf("%d", &e);
printf("Masukkan jumlah suara sah paslon 4 yang baru: ");
scanf("%d", &f);
for(i=pilihan-1; i<sudahInput-1; i++ ){
strcpy((daerah+i)->namaDaerah, temp);
(daerah+i)->jmlhPemilihPerDaerah = a;
(daerah+i)->golput = b;
(daerah+i)->paslon1 = c;
(daerah+i)->paslon2 = d;
(daerah+i)->paslon3 = e;
(daerah+i)->paslon4 = f;
}
}
tabel();
}
//Function display terima kasih ketika user ingin menyelesaikan program
void thankYou(){
system("cls");
printf("\n\t\t\t\t|-------------------------------------------------|\n");
printf("\t\t\t\t|| ||\n");
printf("\t\t\t\t|| Terima Kasih Sudah Menggunakan Program Ini! ||\n");
printf("\t\t\t\t|| ||\n");
printf("\t\t\t\t|-------------------------------------------------|\n");
Sleep(500);
exit(0);
}
|
set.h | #ifndef __SET_H__
#define __SET_H__
#include "../tensor/algstrct.h"
#include "functions.h"
//#include <stdint.h>
#include <limits>
#include <inttypes.h>
#include "../shared/memcontrol.h"
#ifdef _OPENMP
#include "omp.h"
#endif
#ifdef _OPENMP
#include "omp.h"
#endif
namespace CTF {
/**
* \brief index-value pair used for tensor data input
*/
template<typename dtype=double>
class Pair {
public:
/** \brief key, global index [i1,i2,...] specified as i1+len[0]*i2+... */
int64_t k;
/** \brief tensor value associated with index */
dtype d;
/**
* \brief constructor builds pair
* \param[in] k_ key
* \param[in] d_ value
*/
Pair(int64_t k_, dtype d_){
this->k = k_;
d = d_;
}
/**
* \brief default constructor
*/
Pair(){
//k=0;
//d=0; //(not possible if type has no zero!)
}
/**
* \brief determines pair ordering
*/
bool operator<(Pair<dtype> other) const {
return k<other.k;
}
};
template<typename dtype>
inline bool comp_pair(Pair<dtype> i,
Pair<dtype> j) {
return (i.k<j.k);
}
}
namespace CTF_int {
//does conversion using MKL function if it is available
bool try_mkl_coo_to_csr(int64_t nz, int nrow, char * csr_vs, int * csr_ja, int * csr_ia, char const * coo_vs, int const * coo_rs, int const * coo_cs, int el_size);
bool try_mkl_csr_to_coo(int64_t nz, int nrow, char const * csr_vs, int const * csr_ja, int const * csr_ia, char * coo_vs, int * coo_rs, int * coo_cs, int el_size);
template <typename dtype>
void seq_coo_to_csr(int64_t nz, int nrow, dtype * csr_vs, int * csr_ja, int * csr_ia, dtype const * coo_vs, int const * coo_rs, int const * coo_cs){
int sz = sizeof(dtype);
if (sz == 4 || sz == 8 || sz == 16){
bool b = try_mkl_coo_to_csr(nz, nrow, (char*)csr_vs, csr_ja, csr_ia, (char const*)coo_vs, coo_rs, coo_cs, sz);
if (b) return;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=0; i<nz; i++){
csr_ja[i] = i;
//printf("csr_ja[%d/%d] = %d\n",i,nz,csr_ja[i]);
}
class comp_ref {
public:
int const * a;
comp_ref(int const * a_){ a = a_; }
bool operator()(int u, int v){
return a[u] < a[v];
}
};
comp_ref crc(coo_cs);
std::sort(csr_ja, csr_ja+nz, crc);
comp_ref crr(coo_rs);
std::stable_sort(csr_ja, csr_ja+nz, crr);
// do not copy by value in case values are objects, then csr_vs is uninitialized
//printf("csr nz = %ld\n",nz);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=0; i<nz; i++){
//printf("%d, %d, %ld\n",(int)((char*)(coo_vs+csr_ja[i])-(char*)(coo_vs))-csr_ja[i]*sizeof(dtype),sizeof(dtype),csr_ja[i]);
// memcpy(csr_vs+i, coo_vs+csr_ja[i]-1,sizeof(dtype));
//memcpy(csr_vs+i, coo_vs+csr_ja[i],sizeof(dtype));
csr_vs[i] = coo_vs[csr_ja[i]];
// printf("i %ld csr_ja[i] %d\n", i, csr_ja[i]);
// printf("i %ld v %lf\n", i, csr_vs[i]);
//printf("%p %d\n",coo_vs+i,*(int32_t*)(coo_vs+i));
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=0; i<nz; i++){
csr_ja[i] = coo_cs[csr_ja[i]];
}
csr_ia[0] = 1;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i=1; i<nrow+1; i++){
csr_ia[i] = 0;
}
#ifdef _OPENMP
int * scoo_rs = (int*)CTF_int::alloc(sizeof(int)*nz);
memcpy(scoo_rs, coo_rs, nz*sizeof(int));
std::sort(scoo_rs,scoo_rs+nz);
#pragma omp parallel
{
int tid = omp_get_thread_num();
int ntd = omp_get_num_threads();
int64_t i_st = tid*(nz/ntd)+std::min(tid,(int)(nz%ntd));
int64_t i_end = (tid+1)*(nz/ntd)+std::min((tid+1),(int)(nz%ntd));
while (i_st > 0 && i_st < nz && scoo_rs[i_st] == scoo_rs[i_st-1]) i_st++;
while (i_end < nz && scoo_rs[i_end] == scoo_rs[i_end-1]) i_end++;
for (int64_t i=i_st; i<i_end; i++){
csr_ia[scoo_rs[i]]++;
}
}
CTF_int::cdealloc(scoo_rs);
#else
for (int64_t i=0; i<nz; i++){
//printf("scoo_rs[%d]=%d\n",i,scoo_rs[i]);
csr_ia[coo_rs[i]]++;
}
#endif
#ifdef _OPENMP
//int * csr_ia2 = (int*)CTF_int::alloc(sizeof(int)*(nrow+1));
//CTF_int::prefix<int>(nrow+1, csr_ia, csr_ia2);
////memcpy(csr_ia, csr_ia2, nrow*sizeof(int));
//#pragma omp parallel for
//for (int i=0; i<nrow+1; i++){
// assert((i==0 && csr_ia2[i] == 0) || csr_ia[i-1] == csr_ia2[i]);
// csr_ia[i] += csr_ia2[i];
// printf("csr_ia[%d/%d] = %d\n",i,nrow,csr_ia[i]);
//}
//CTF_int::cdealloc(csr_ia2);
CTF_int::parallel_postfix<int>(nrow+1, 1, csr_ia);
#else
for (int i=0; i<nrow; i++){
csr_ia[i+1] += csr_ia[i];
//printf("csr_ia[%d/%d] = %d\n",i,nrow,csr_ia[i]);
}
#endif
}
template <typename dtype>
void seq_coo_to_ccsr(int64_t nz, int64_t nnz_row, dtype * ccsr_vs, int * ccsr_ja, int * ccsr_ia, dtype const * coo_vs, int64_t const * coo_rs, int64_t const * coo_cs){
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=0; i<nz; i++){
ccsr_ja[i] = i;
//printf("ccsr_ja[%d/%d] = %d\n",i,nz,ccsr_ja[i]);
}
class comp_ref {
public:
int64_t const * a;
comp_ref(int64_t const * a_){ a = a_; }
bool operator()(int u, int v){
return a[u] < a[v];
}
};
comp_ref crc(coo_cs);
std::sort(ccsr_ja, ccsr_ja+nz, crc);
comp_ref crr(coo_rs);
std::stable_sort(ccsr_ja, ccsr_ja+nz, crr);
// do not copy by value in case values are objects, then ccsr_vs is uninitialized
//printf("ccsr nz = %ld\n",nz);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=0; i<nz; i++){
//printf("%d, %d, %ld\n",(int)((char*)(coo_vs+ccsr_ja[i])-(char*)(coo_vs))-ccsr_ja[i]*sizeof(dtype),sizeof(dtype),ccsr_ja[i]);
// memcpy(ccsr_vs+i, coo_vs+ccsr_ja[i]-1,sizeof(dtype));
//memcpy(ccsr_vs+i, coo_vs+ccsr_ja[i],sizeof(dtype));
ccsr_vs[i] = coo_vs[ccsr_ja[i]];
// printf("i %ld ccsr_ja[i] %d\n", i, ccsr_ja[i]);
// printf("i %ld v %lf\n", i, ccsr_vs[i]);
//printf("%p %d\n",coo_vs+i,*(int32_t*)(coo_vs+i));
}
ccsr_ia[0] = 1;
ccsr_ia[1] = 1 + (nz>0);
//FIXME: parallelize
int64_t cia = 1;
for (int64_t i=1; i<nz; i++){
if (coo_rs[ccsr_ja[i]] > coo_rs[ccsr_ja[i-1]]){
cia++;
ccsr_ia[cia] = ccsr_ia[cia-1];
}
ccsr_ia[cia]++;
}
//#ifdef _OPENMP
// #pragma omp parallel for
//#endif
// for (int i=0; i<nnz_row; i++){
// ccsr_ia[i+1] += ccsr_ia[i];
// //printf("ccsr_ia[%d/%d] = %d\n",i,nrow,ccsr_ia[i]);
// }
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=0; i<nz; i++){
ccsr_ja[i] = coo_cs[ccsr_ja[i]];
}
}
template <typename dtype>
void seq_csr_to_coo(int64_t nz, int nrow, dtype const * csr_vs, int const * csr_ja, int const * csr_ia, dtype * coo_vs, int * coo_rs, int * coo_cs){
int sz = sizeof(dtype);
if (sz == 4 || sz == 8 || sz == 16){
bool b = try_mkl_csr_to_coo(nz, nrow, (char const*)csr_vs, csr_ja, csr_ia, (char*)coo_vs, coo_rs, coo_cs, sz);
if (b) return;
}
//memcpy(coo_vs, csr_vs, sizeof(dtype)*nz);
std::copy(csr_vs, csr_vs+nz, coo_vs);
memcpy(coo_cs, csr_ja, sizeof(int)*nz);
for (int i=0; i<nrow; i++){
std::fill(coo_rs+csr_ia[i]-1, coo_rs+csr_ia[i+1]-1, i+1);
}
}
template <typename dtype>
void def_coo_to_ccsr(int64_t nz, int64_t nnz_row, dtype * ccsr_vs, int * ccsr_ja, int * ccsr_ia, dtype const * coo_vs, int64_t const * coo_rs, int64_t const * coo_cs){
seq_coo_to_ccsr<dtype>(nz, nnz_row, ccsr_vs, ccsr_ja, ccsr_ia, coo_vs, coo_rs, coo_cs);
}
template <typename dtype>
void def_coo_to_csr(int64_t nz, int nrow, dtype * csr_vs, int * csr_ja, int * csr_ia, dtype const * coo_vs, int const * coo_rs, int const * coo_cs){
seq_coo_to_csr<dtype>(nz, nrow, csr_vs, csr_ja, csr_ia, coo_vs, coo_rs, coo_cs);
}
template <typename dtype>
void def_csr_to_coo(int64_t nz, int nrow, dtype const * csr_vs, int const * csr_ja, int const * csr_ia, dtype * coo_vs, int * coo_rs, int * coo_cs){
seq_csr_to_coo<dtype>(nz, nrow, csr_vs, csr_ja, csr_ia, coo_vs, coo_rs, coo_cs);
}
template <typename dtype>
void seq_ccsr_to_coo(int64_t nz, int64_t nnz_row, dtype const * ccsr_vs, int const * ccsr_ja, int const * ccsr_ia, int64_t const * row_enc, dtype * coo_vs, int64_t * coo_rs, int64_t * coo_cs){
//memcpy(coo_vs, ccsr_vs, sizeof(dtype)*nz);
std::copy(ccsr_vs, ccsr_vs+nz, coo_vs);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=0; i<nz; i++){
coo_cs[i] = ccsr_ja[i];
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=0; i<nnz_row; i++){
std::fill(coo_rs+ccsr_ia[i]-1, coo_rs+ccsr_ia[i+1]-1, row_enc[i]);
}
}
template <typename dtype>
void def_coo_to_ccsr(int64_t nz, int64_t nnz_row, dtype * ccsr_vs, int * ccsr_ja, int * ccsr_ia, int const * row_enc, dtype const * coo_vs, int64_t const * coo_rs, int64_t const * coo_cs){
seq_coo_to_ccsr<dtype>(nz, nnz_row, ccsr_vs, ccsr_ja, ccsr_ia, row_enc, coo_vs, coo_rs, coo_cs);
}
template <typename dtype>
void def_ccsr_to_coo(int64_t nz, int64_t nnz_row, dtype const * ccsr_vs, int const * ccsr_ja, int const * ccsr_ia, int64_t const * row_enc, dtype * coo_vs, int64_t * coo_rs, int64_t * coo_cs){
seq_ccsr_to_coo<dtype>(nz, nnz_row, ccsr_vs, ccsr_ja, ccsr_ia, row_enc, coo_vs, coo_rs, coo_cs);
}
template <typename dtype>
bool default_isequal(dtype a, dtype b){
int sz = sizeof(dtype);
for (int i=0; i<sz; i++){
if (((char const *)&a)[i] != ((char const *)&b)[i]){
return false;
}
}
return true;
}
template <typename dtype>
dtype default_addinv(dtype a){
return -a;
}
template <typename dtype, bool is_ord>
inline typename std::enable_if<is_ord, dtype>::type
default_abs(dtype a){
dtype b = default_addinv<dtype>(a);
return a>=b ? a : b;
}
template <typename dtype, bool is_ord>
inline typename std::enable_if<!is_ord, dtype>::type
default_abs(dtype a){
printf("CTF ERROR: cannot compute abs unless the set is ordered");
assert(0);
return a;
}
template <typename dtype, dtype (*abs)(dtype)>
void char_abs(char const * a,
char * b){
((dtype*)b)[0]=abs(((dtype const*)a)[0]);
}
//C++14 support needed for these std::enable_if
template <typename dtype, bool is_ord>
inline typename std::enable_if<is_ord, dtype>::type
default_min(dtype a, dtype b){
return a>b ? b : a;
}
template <typename dtype, bool is_ord>
inline typename std::enable_if<!is_ord, dtype>::type
default_min(dtype a, dtype b){
printf("CTF ERROR: cannot compute a max unless the set is ordered");
assert(0);
return a;
}
template <typename dtype, bool is_ord>
inline typename std::enable_if<is_ord, dtype>::type
default_max_lim(){
return std::numeric_limits<dtype>::max();
}
template <typename dtype, bool is_ord>
inline typename std::enable_if<!is_ord, dtype>::type
default_max_lim(){
printf("CTF ERROR: cannot compute a max unless the set is ordered");
assert(0);
dtype * a = NULL;
return *a;
}
template <typename dtype, bool is_ord>
inline typename std::enable_if<is_ord, dtype>::type
default_min_lim(){
return std::numeric_limits<dtype>::min();
}
template <typename dtype, bool is_ord>
inline typename std::enable_if<!is_ord, dtype>::type
default_min_lim(){
printf("CTF ERROR: cannot compute a max unless the set is ordered");
assert(0);
dtype * a = NULL;
return *a;
}
template <typename dtype, bool is_ord>
inline typename std::enable_if<is_ord, dtype>::type
default_max(dtype a, dtype b){
return b>a ? b : a;
}
template <typename dtype, bool is_ord>
inline typename std::enable_if<!is_ord, dtype>::type
default_max(dtype a, dtype b){
printf("CTF ERROR: cannot compute a min unless the set is ordered");
assert(0);
return a;
}
template <typename dtype>
MPI_Datatype get_default_mdtype(bool & is_custom){
MPI_Datatype newtype;
MPI_Type_contiguous(sizeof(dtype), MPI_BYTE, &newtype);
MPI_Type_commit(&newtype);
is_custom = true;
return newtype;
}
extern MPI_Datatype MPI_CTF_BOOL;
extern MPI_Datatype MPI_CTF_DOUBLE_COMPLEX;
extern MPI_Datatype MPI_CTF_LONG_DOUBLE_COMPLEX;
template <>
inline MPI_Datatype get_default_mdtype<bool>(bool & is_custom){ is_custom=false; return MPI_CTF_BOOL; }
template <>
inline MPI_Datatype get_default_mdtype< std::complex<double> >(bool & is_custom){ is_custom=false; return MPI_CTF_DOUBLE_COMPLEX; }
template <>
inline MPI_Datatype get_default_mdtype< std::complex<long double> >(bool & is_custom){ is_custom=false; return MPI_CTF_LONG_DOUBLE_COMPLEX; }
template <>
inline MPI_Datatype get_default_mdtype<char>(bool & is_custom){ is_custom=false; return MPI_CHAR; }
template <>
inline MPI_Datatype get_default_mdtype<int>(bool & is_custom){ is_custom=false; return MPI_INT; }
template <>
inline MPI_Datatype get_default_mdtype<int64_t>(bool & is_custom){ is_custom=false; return MPI_INT64_T; }
template <>
inline MPI_Datatype get_default_mdtype<unsigned int>(bool & is_custom){ is_custom=false; return MPI_UNSIGNED; }
template <>
inline MPI_Datatype get_default_mdtype<uint64_t>(bool & is_custom){ is_custom=false; return MPI_UINT64_T; }
template <>
inline MPI_Datatype get_default_mdtype<float>(bool & is_custom){ is_custom=false; return MPI_FLOAT; }
template <>
inline MPI_Datatype get_default_mdtype<double>(bool & is_custom){ is_custom=false; return MPI_DOUBLE; }
template <>
inline MPI_Datatype get_default_mdtype<long double>(bool & is_custom){ is_custom=false; return MPI_LONG_DOUBLE; }
template <>
inline MPI_Datatype get_default_mdtype< std::complex<float> >(bool & is_custom){ is_custom=false; return MPI_COMPLEX; }
template <typename dtype>
constexpr bool get_default_is_ord(){
return false;
}
#define INST_ORD_TYPE(dtype) \
template <> \
constexpr bool get_default_is_ord<dtype>(){ \
return true; \
}
INST_ORD_TYPE(float)
INST_ORD_TYPE(double)
INST_ORD_TYPE(long double)
INST_ORD_TYPE(bool)
INST_ORD_TYPE(char)
INST_ORD_TYPE(int)
INST_ORD_TYPE(unsigned int)
INST_ORD_TYPE(int64_t)
INST_ORD_TYPE(uint64_t)
#define INST_IET(typ) \
template <> \
inline bool default_isequal<typ>(typ a, typ b){ \
return a==b; \
} \
INST_IET(float)
INST_IET(double)
INST_IET(std::complex<float>)
INST_IET(std::complex<double>)
INST_IET(bool)
INST_IET(int)
INST_IET(int16_t)
INST_IET(int64_t)
INST_IET(uint16_t)
INST_IET(uint32_t)
INST_IET(uint64_t)
INST_IET(std::complex<long double>)
INST_IET(long double)
}
namespace CTF {
/** \brief pair for sorting */
template <typename dtype>
struct dtypePair{
int64_t key;
dtype data;
bool operator < (const dtypePair<dtype>& other) const {
return (key < other.key);
}
};
/**
* \defgroup algstrct Algebraic Structures
* \addtogroup algstrct
* @{
*/
/**
* \brief Set class defined by a datatype and a min/max function (if it is partially ordered i.e. is_ord=true)
* currently assumes min and max are given by numeric_limits (custom min/max not allowed)
*/
template <typename dtype=double, bool is_ord=CTF_int::get_default_is_ord<dtype>()>
class Set : public CTF_int::algstrct {
public:
int pair_sz;
bool is_custom_mdtype;
MPI_Datatype tmdtype;
~Set(){
if (is_custom_mdtype) MPI_Type_free(&tmdtype);
}
Set(Set const & other) : CTF_int::algstrct(other) {
if (other.is_custom_mdtype){
tmdtype = CTF_int::get_default_mdtype<dtype>(is_custom_mdtype);
} else {
this->tmdtype = other.tmdtype;
is_custom_mdtype = false;
}
pair_sz = sizeof(std::pair<int64_t,dtype>);
//printf("%ld %ld \n", sizeof(dtype), pair_sz);
abs = other.abs;
}
int pair_size() const {
//printf("%d %d \n", sizeof(dtype), pair_sz);
return pair_sz;
}
int64_t get_key(char const * a) const {
return ((std::pair<int64_t,dtype> const *)a)->first;
}
char * get_value(char * a) const {
return (char*)&(((std::pair<int64_t,dtype> const *)a)->second);
}
char const * get_const_value(char const * a) const {
return (char const *)&(((std::pair<int64_t,dtype> const *)a)->second);
}
virtual CTF_int::algstrct * clone() const {
return new Set<dtype, is_ord>(*this);
}
bool is_ordered() const { return is_ord; }
Set() : CTF_int::algstrct(sizeof(dtype)){
tmdtype = CTF_int::get_default_mdtype<dtype>(is_custom_mdtype);
set_abs_to_default();
pair_sz = sizeof(std::pair<int64_t,dtype>);
}
void set_abs_to_default(){
abs = &CTF_int::char_abs< dtype, CTF_int::default_abs<dtype, is_ord> >;
}
MPI_Datatype mdtype() const {
return tmdtype;
}
void min(char const * a,
char const * b,
char * c) const {
((dtype*)c)[0] = CTF_int::default_min<dtype,is_ord>(((dtype*)a)[0],((dtype*)b)[0]);
}
void max(char const * a,
char const * b,
char * c) const {
((dtype*)c)[0] = CTF_int::default_max<dtype,is_ord>(((dtype*)a)[0],((dtype*)b)[0]);
}
void min(char * c) const {
((dtype*)c)[0] = CTF_int::default_min_lim<dtype,is_ord>();
}
void max(char * c) const {
((dtype*)c)[0] = CTF_int::default_max_lim<dtype,is_ord>();
}
void cast_double(double d, char * c) const {
//((dtype*)c)[0] = (dtype)d;
printf("CTF ERROR: double cast not possible for this algebraic structure\n");
assert(0);
}
void cast_int(int64_t i, char * c) const {
//((dtype*)c)[0] = (dtype)i;
printf("CTF ERROR: integer cast not possible for this algebraic structure\n");
assert(0);
}
double cast_to_double(char const * c) const {
printf("CTF ERROR: double cast not possible for this algebraic structure\n");
IASSERT(0);
assert(0);
return 0.0;
}
int64_t cast_to_int(char const * c) const {
printf("CTF ERROR: int cast not possible for this algebraic structure\n");
assert(0);
return 0;
}
void print(char const * a, FILE * fp=stdout) const {
for (int i=0; i<el_size; i++){
fprintf(fp,"%x",a[i]);
}
}
bool isequal(char const * a, char const * b) const {
if (a == NULL && b == NULL) return true;
if (a == NULL || b == NULL) return false;
for (int i=0; i<el_size; i++){
if (a[i] != b[i]) return false;
}
return true;
}
void coo_to_csr(int64_t nz, int nrow, char * csr_vs, int * csr_ja, int * csr_ia, char const * coo_vs, int const * coo_rs, int const * coo_cs) const {
CTF_int::def_coo_to_csr(nz, nrow, (dtype *)csr_vs, csr_ja, csr_ia, (dtype const *) coo_vs, coo_rs, coo_cs);
}
void csr_to_coo(int64_t nz, int nrow, char const * csr_vs, int const * csr_ja, int const * csr_ia, char * coo_vs, int * coo_rs, int * coo_cs) const {
CTF_int::def_csr_to_coo(nz, nrow, (dtype const *)csr_vs, csr_ja, csr_ia, (dtype*) coo_vs, coo_rs, coo_cs);
}
void coo_to_ccsr(int64_t nz, int64_t nnz_row, char * ccsr_vs, int * ccsr_ja, int * ccsr_ia, char const * coo_vs, int64_t const * coo_rs, int64_t const * coo_cs) const {
CTF_int::def_coo_to_ccsr(nz, nnz_row, (dtype *)ccsr_vs, ccsr_ja, ccsr_ia, (dtype const *) coo_vs, coo_rs, coo_cs);
}
void ccsr_to_coo(int64_t nz, int64_t nnz_row, char const * csr_vs, int const * csr_ja, int const * csr_ia, int64_t const * row_enc, char * coo_vs, int64_t * coo_rs, int64_t * coo_cs) const {
CTF_int::def_ccsr_to_coo(nz, nnz_row, (dtype const *)csr_vs, csr_ja, csr_ia, row_enc, (dtype*) coo_vs, coo_rs, coo_cs);
}
char * pair_alloc(int64_t n) const {
//assert(sizeof(std::pair<int64_t,dtype>[n])==(uint64_t)(pair_size()*n));
CTF_int::memprof_alloc_pre(n*sizeof(std::pair<int64_t,dtype>));
char * ptr = (char*)(new std::pair<int64_t,dtype>[n]);
CTF_int::memprof_alloc_post(n*sizeof(std::pair<int64_t,dtype>),(void**)&ptr);
return ptr;
}
char * alloc(int64_t n) const {
//assert(sizeof(dtype[n])==(uint64_t)(el_size*n));
CTF_int::memprof_alloc_pre(n*sizeof(dtype));
char * ptr = (char*)(new dtype[n]);
CTF_int::memprof_alloc_post(n*sizeof(dtype),(void**)&ptr);
return ptr;
}
void dealloc(char * ptr) const {
CTF_int::memprof_dealloc(ptr);
return delete [] (dtype*)ptr;
}
void pair_dealloc(char * ptr) const {
CTF_int::memprof_dealloc(ptr);
return delete [] (std::pair<int64_t,dtype>*)ptr;
}
void sort(int64_t n, char * pairs) const {
std::sort((dtypePair<dtype>*)pairs,((dtypePair<dtype>*)pairs)+n);
}
void copy(char * a, char const * b) const {
((dtype *)a)[0] = ((dtype const *)b)[0];
}
void copy(char * a, char const * b, int64_t n) const {
std::copy((dtype const *)b, ((dtype const *)b) + n, (dtype *)a);
}
void copy_pair(char * a, char const * b) const {
((std::pair<int64_t,dtype> *)a)[0] = ((std::pair<int64_t,dtype> const *)b)[0];
}
void copy_pairs(char * a, char const * b, int64_t n) const {
std::copy((std::pair<int64_t,dtype> const *)b, ((std::pair<int64_t,dtype> const *)b) + n, (std::pair<int64_t,dtype> *)a);
//std::copy((std::pair<int64_t,dtype> *)a, (std::pair<int64_t,dtype> const *)b, n);
//for (int64_t i=0; i<n; i++){
/*printf("i=%ld\n",i);
this->print((char*)&(((std::pair<int64_t,dtype> const *)a)[i].second));
this->print((char*)&(((std::pair<int64_t,dtype> const *)b)[i].second));*/
//((std::pair<int64_t,dtype>*)a)[i] = ((std::pair<int64_t,dtype> const *)b)[i];
//this->print((char*)&(((std::pair<int64_t,dtype> const *)a)[i].second));
//}
}
void set(char * a, char const * b, int64_t n) const {
if (n >= 100) {
#ifdef _OPENMP
dtype *ia = (dtype*)a;
dtype ib = *((dtype*)b);
#pragma omp parallel
{
int64_t tid = omp_get_thread_num();
int64_t chunksize = n / omp_get_num_threads();
dtype *begin = ia + chunksize * tid;
dtype *end;
if (tid == omp_get_num_threads() - 1)
end = ia + n;
else
end = begin + chunksize;
std::fill(begin, end, ib);
}
return;
#endif
}
std::fill((dtype*)a, ((dtype*)a)+n, *((dtype*)b));
}
void set_pair(char * a, int64_t key, char const * b) const {
((std::pair<int64_t,dtype> *)a)[0] = std::pair<int64_t,dtype>(key,*((dtype*)b));
}
void set_pairs(char * a, char const * b, int64_t n) const {
std::fill((std::pair<int64_t,dtype> *)a, (std::pair<int64_t,dtype> *)a + n, *(std::pair<int64_t,dtype> const*)b);
}
void copy(int64_t n, char const * a, int inc_a, char * b, int inc_b) const {
dtype const * da = (dtype const*)a;
dtype * db = (dtype *)b;
for (int64_t i=0; i<n; i++){
db[inc_b*i] = da[inc_a*i];
}
}
void copy(int64_t m,
int64_t n,
char const * a,
int64_t lda_a,
char * b,
int64_t lda_b) const {
dtype const * da = (dtype const*)a;
dtype * db = (dtype *)b;
for (int64_t j=0; j<n; j++){
for (int64_t i=0; i<m; i++){
db[j*lda_b+i] = da[j*lda_a+i];
}
}
}
void init(int64_t n, char * arr) const {
dtype addid = dtype();
set(arr, (char const *)&addid, n);
}
/** \brief initialize n objects to zero
* \param[in] n number of items
* \param[in] arr array containing n items, to be set to zero
*/
virtual void init_shell(int64_t n, char * arr) const {
dtype dummy = dtype();
for (int i=0; i<n; i++){
memcpy(arr+i*el_size,(char*)&dummy,el_size);
}
}
CTF_int::bivar_function * get_elementwise_smaller() const {
return new Bivar_Function<dtype,dtype,bool>([](dtype a, dtype b){ return !CTF_int::default_isequal<dtype>(CTF_int::default_max<dtype,is_ord>(a,b), a);});
}
CTF_int::bivar_function * get_elementwise_smaller_or_equal() const {
return new Bivar_Function<dtype,dtype,bool>([](dtype a, dtype b){ return CTF_int::default_isequal<dtype>(CTF_int::default_max<dtype,is_ord>(a,b), b);});
}
CTF_int::bivar_function * get_elementwise_is_equal() const {
return new Bivar_Function<dtype,dtype,bool>([](dtype a, dtype b){ return CTF_int::default_isequal<dtype>(a, b);});
}
CTF_int::bivar_function * get_elementwise_is_not_equal() const {
return new Bivar_Function<dtype,dtype,bool>([](dtype a, dtype b){ return !CTF_int::default_isequal<dtype>(a, b);});
}
/*
void copy(int64_t m,
int64_t n,
char const * a,
int64_t lda_a,
char const * alpha,
char * b,
int64_t lda_b,
char const * beta) const {
dtype const * da = (dtype const*)a;
dtype dalpha = *((dtype const*)alpha);
dtype dbeta = *((dtype const*)beta);
dtype * db = (dtype *)b;
for (int64_t j=0; j<n; j++){
for (int64_t i=0; i<m; i++){
dbeta*db[j*lda_b+i] += dalpha*da[j*lda_a+i]
}
}
}*/
};
//FIXME do below with macros to shorten
template <>
inline void Set<float>::cast_double(double d, char * c) const {
((float*)c)[0] = (float)d;
}
template <>
inline void Set<double>::cast_double(double d, char * c) const {
((double*)c)[0] = d;
}
template <>
inline void Set<long double>::cast_double(double d, char * c) const {
((long double*)c)[0] = (long double)d;
}
template <>
inline void Set<int>::cast_double(double d, char * c) const {
((int*)c)[0] = (int)d;
}
template <>
inline void Set<uint64_t>::cast_double(double d, char * c) const {
((uint64_t*)c)[0] = (uint64_t)d;
}
template <>
inline void Set<int64_t>::cast_double(double d, char * c) const {
((int64_t*)c)[0] = (int64_t)d;
}
template <>
inline void Set< std::complex<float>,false >::cast_double(double d, char * c) const {
((std::complex<float>*)c)[0] = (std::complex<float>)d;
}
template <>
inline void Set< std::complex<double>,false >::cast_double(double d, char * c) const {
((std::complex<double>*)c)[0] = (std::complex<double>)d;
}
template <>
inline void Set< std::complex<long double>,false >::cast_double(double d, char * c) const {
((std::complex<long double>*)c)[0] = (std::complex<long double>)d;
}
template <>
inline void Set<float>::cast_int(int64_t d, char * c) const {
((float*)c)[0] = (float)d;
}
template <>
inline void Set<double>::cast_int(int64_t d, char * c) const {
((double*)c)[0] = (double)d;
}
template <>
inline void Set<long double>::cast_int(int64_t d, char * c) const {
((long double*)c)[0] = (long double)d;
}
template <>
inline void Set<int>::cast_int(int64_t d, char * c) const {
((int*)c)[0] = (int)d;
}
template <>
inline void Set<uint64_t>::cast_int(int64_t d, char * c) const {
((uint64_t*)c)[0] = (uint64_t)d;
}
template <>
inline void Set<int64_t>::cast_int(int64_t d, char * c) const {
((int64_t*)c)[0] = (int64_t)d;
}
template <>
inline void Set< std::complex<float>,false >::cast_int(int64_t d, char * c) const {
((std::complex<float>*)c)[0] = (std::complex<float>)d;
}
template <>
inline void Set< std::complex<double>,false >::cast_int(int64_t d, char * c) const {
((std::complex<double>*)c)[0] = (std::complex<double>)d;
}
template <>
inline void Set< std::complex<long double>,false >::cast_int(int64_t d, char * c) const {
((std::complex<long double>*)c)[0] = (std::complex<long double>)d;
}
template <>
inline double Set<float>::cast_to_double(char const * c) const {
return (double)(((float*)c)[0]);
}
template <>
inline double Set<double>::cast_to_double(char const * c) const {
return ((double*)c)[0];
}
template <>
inline double Set<int>::cast_to_double(char const * c) const {
return (double)(((int*)c)[0]);
}
template <>
inline double Set<uint64_t>::cast_to_double(char const * c) const {
return (double)(((uint64_t*)c)[0]);
}
template <>
inline double Set<int64_t>::cast_to_double(char const * c) const {
return (double)(((int64_t*)c)[0]);
}
template <>
inline int64_t Set<int64_t>::cast_to_int(char const * c) const {
return ((int64_t*)c)[0];
}
template <>
inline int64_t Set<int>::cast_to_int(char const * c) const {
return (int64_t)(((int*)c)[0]);
}
template <>
inline int64_t Set<unsigned int>::cast_to_int(char const * c) const {
return (int64_t)(((unsigned int*)c)[0]);
}
template <>
inline int64_t Set<uint64_t>::cast_to_int(char const * c) const {
return (int64_t)(((uint64_t*)c)[0]);
}
template <>
inline int64_t Set<bool>::cast_to_int(char const * c) const {
return (int64_t)(((bool*)c)[0]);
}
template <>
inline void Set<float>::print(char const * a, FILE * fp) const {
fprintf(fp,"%11.5E",((float*)a)[0]);
}
template <>
inline void Set<double>::print(char const * a, FILE * fp) const {
fprintf(fp,"%11.5E",((double*)a)[0]);
}
template <>
inline void Set<int64_t>::print(char const * a, FILE * fp) const {
fprintf(fp,"%ld",((int64_t*)a)[0]);
}
template <>
inline void Set<uint64_t>::print(char const * a, FILE * fp) const {
fprintf(fp,"%lu",((uint64_t*)a)[0]);
}
template <>
inline void Set<uint32_t>::print(char const * a, FILE * fp) const {
fprintf(fp,"%u",((uint32_t*)a)[0]);
}
template <>
inline void Set<int>::print(char const * a, FILE * fp) const {
fprintf(fp,"%d",((int*)a)[0]);
}
template <>
inline void Set< std::complex<float>,false >::print(char const * a, FILE * fp) const {
fprintf(fp,"(%11.5E,%11.5E)",((std::complex<float>*)a)[0].real(),((std::complex<float>*)a)[0].imag());
}
template <>
inline void Set< std::complex<double>,false >::print(char const * a, FILE * fp) const {
fprintf(fp,"(%11.5E,%11.5E)",((std::complex<double>*)a)[0].real(),((std::complex<double>*)a)[0].imag());
}
template <>
inline void Set< std::complex<long double>,false >::print(char const * a, FILE * fp) const {
fprintf(fp,"(%11.5LE,%11.5LE)",((std::complex<long double>*)a)[0].real(),((std::complex<long double>*)a)[0].imag());
}
template <>
inline bool Set<float>::isequal(char const * a, char const * b) const {
if (a == NULL && b == NULL) return true;
if (a == NULL || b == NULL) return false;
return ((float*)a)[0] == ((float*)b)[0];
}
template <>
inline bool Set<double>::isequal(char const * a, char const * b) const {
if (a == NULL && b == NULL) return true;
if (a == NULL || b == NULL) return false;
return ((double*)a)[0] == ((double*)b)[0];
}
template <>
inline bool Set<int>::isequal(char const * a, char const * b) const {
if (a == NULL && b == NULL) return true;
if (a == NULL || b == NULL) return false;
return ((int*)a)[0] == ((int*)b)[0];
}
template <>
inline bool Set<uint64_t>::isequal(char const * a, char const * b) const {
if (a == NULL && b == NULL) return true;
if (a == NULL || b == NULL) return false;
return ((uint64_t*)a)[0] == ((uint64_t*)b)[0];
}
template <>
inline bool Set<int64_t>::isequal(char const * a, char const * b) const {
if (a == NULL && b == NULL) return true;
if (a == NULL || b == NULL) return false;
return ((int64_t*)a)[0] == ((int64_t*)b)[0];
}
template <>
inline bool Set<long double>::isequal(char const * a, char const * b) const {
if (a == NULL && b == NULL) return true;
if (a == NULL || b == NULL) return false;
return ((long double*)a)[0] == ((long double*)b)[0];
}
template <>
inline bool Set< std::complex<float>,false >::isequal(char const * a, char const * b) const {
if (a == NULL && b == NULL) return true;
if (a == NULL || b == NULL) return false;
return (( std::complex<float> *)a)[0] == (( std::complex<float> *)b)[0];
}
template <>
inline bool Set< std::complex<double>,false >::isequal(char const * a, char const * b) const {
if (a == NULL && b == NULL) return true;
if (a == NULL || b == NULL) return false;
return (( std::complex<double> *)a)[0] == (( std::complex<double> *)b)[0];
}
template <>
inline bool Set< std::complex<long double>,false >::isequal(char const * a, char const * b) const {
if (a == NULL && b == NULL) return true;
if (a == NULL || b == NULL) return false;
return (( std::complex<long double> *)a)[0] == (( std::complex<long double> *)b)[0];
}
/**
* @}
*/
}
#include "monoid.h"
#endif
|
inputBug330-1.c | /*
test preprocessing info before and after a statement
*/
int main (int argc,char** argv)
{
int i;
#if defined(_OPENMP)
#pragma omp master
{
i++;
}
#endif
return 0;
}
|
cglobals.h | #ifndef RTGLOBALS
#define RTGLOBALS
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
#define WARP_SIZE 32
#define Z_ORDER_BLOCK_SIZE 16
#define CMP_RESULTS_BLOCK_SIZE 256
#define HRT_RAY_MISS 0xFFFFFFFE
#define HRT_RAY_HIT 0xFFFFFFFF
#define GMAXVARS 64
#define INVALID_TEXTURE 0xFFFFFFFE
#define TEX_POINT_SAM 1
#define TEX_ALPHASRC_W 2
#define TEX_CLAMP_U 4
#define TEX_CLAMP_V 8
#define TEX_COORD_SECOND 16
#define TEX_COORD_CAM_PROJ 32
// they are related because data are storen in one int32 variable triAlphaTest
//
#define ALPHA_MATERIAL_MASK 0x00FFFFFF
#define ALPHA_LIGHTMESH_MASK 0xFF000000
#define ALPHA_LIGHTMESH_SHIFT 24
#define ALPHA_OPACITY_TEX_HAPPEND 0x80000000
#define ALPHA_TRANSPARENCY_HAPPEND 0x40000000
#define TEXMATRIX_ID_MASK 0x00FFFFFF // for texture slots - 'color_texMatrixId' and e.t.c
#define TEXSAMPLER_TYPE_MASK 0xFF000000 // for texture slots - 'color_texMatrixId' and e.t.c
#ifndef M_HALFPI
#define M_HALFPI 1.57079632679489661923f
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846f
#endif
#ifndef M_TWOPI
#define M_TWOPI 6.28318530717958647692f
#endif
#ifndef INV_PI
#define INV_PI 0.31830988618379067154f
#endif
#ifndef INV_TWOPI
#define INV_TWOPI 0.15915494309189533577f
#endif
#ifndef INV_FOURPI
#define INV_FOURPI 0.07957747154594766788f
#endif
#ifndef DEG_TO_RAD
#define DEG_TO_RAD (3.14159265358979323846f / 180.f)
#endif
#define GEPSILON 5e-6f
#define DEPSILON 1e-20f
#define DEPSILON2 1e-30f
#define PEPSILON 0.025f
#define PG_SCALE 1000.0f
/**
* These defines are for the QMC remap table to support different mappings in run time.
* for example you may decide to map (0,1) to screen (x,y) and (2,3) to DOF (x,y) or
* you may decide to map (0,1) to screen (x,y) and (2,3,4) to material sampling
* if no mapping presents in the table (id == -1) then pseudo random should be used.
*
*/
#define QMC_VAR_SCR_X 0
#define QMC_VAR_SCR_Y 1
#define QMC_VAR_DOF_X 2
#define QMC_VAR_DOF_Y 3
#define QMC_VAR_SRC_A 4
#define QMC_VAR_MAT_L 5
#define QMC_VAR_MAT_0 6
#define QMC_VAR_MAT_1 7
#define QMC_VAR_LGT_N 8
#define QMC_VAR_LGT_0 9
#define QMC_VAR_LGT_1 10
#define QMC_VAR_LGT_2 11
/**
* Note that unlike QMC, MMLT don't use remap table.
* These offsets are direct offsets in the ramdom vector table (in floats)
*
*/
#define MMLT_HEAD_TOTAL_SIZE 12 //
// [0-3] : LENS; 4 in total
//
#define MMLT_DIM_SCR_X 0
#define MMLT_DIM_SCR_Y 1
#define MMLT_DIM_DOF_X 2
#define MMLT_DIM_DOF_Y 3
// [4-10]: LIGHT; 7 in total
//
#define MMLT_DIM_LGT_X 4
#define MMLT_DIM_LGT_Y 5
#define MMLT_DIM_LGT_Z 6
#define MMLT_DIM_LGT_W 7
#define MMLT_DIM_LGT_X1 8
#define MMLT_DIM_LGT_Y1 9
#define MMLT_DIM_LGT_N 10
// [11] : SPLIT;
//
#define MMLT_DIM_SPLIT 11
#define MMLT_FLOATS_PER_MLAYER 7
#define MMLT_FLOATS_PER_SAMPLE 3
#define MMLT_FLOATS_PER_BOUNCE (MMLT_FLOATS_PER_SAMPLE + MMLT_FLOATS_PER_MLAYER)
#define MMLT_COMPRESSED_F_PERB 6
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define KMLT_HEAD_SIZE 4
#define KMLT_PER_LIGHT 4
#define KMLT_PER_MATERIAL 6
enum MEGATEX_USAGE{ MEGATEX_SHADING = 1,
MEGATEX_SHADING_HDR = 2,
MEGATEX_NORMAL = 3,
MEGATEX_OPACITY = 4,
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef __CUDACC__
#else
#ifdef OCL_COMPILER // OpenCL
#define ALIGN_S(x) __attribute__ ((aligned (x)))
static inline ushort2 make_ushort2(ushort x, ushort y) { ushort2 res; res.x = x; res.y = y; return res; }
static inline int2 make_int2(int a, int b) { int2 res; res.x = a; res.y = b; return res; }
static inline int4 make_int4(int a, int b, int c, int d) { int4 res; res.x = a; res.y = b; res.z = c; res.w = d; return res; }
#define GLOBAL_ID_X get_global_id(0)
#define GLOBAL_ID_Y get_global_id(1)
#define LOCAL_ID_X get_local_id(0)
#define LOCAL_ID_Y get_local_id(1)
#define _PACKED __attribute__ ((packed))
#define __device__
//#define SYNCTHREADS barrier(CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE)
#define SYNCTHREADS_LOCAL barrier(CLK_LOCAL_MEM_FENCE)
#define SYNCTHREADS_GLOBAL barrier(CLK_GLOBAL_MEM_FENCE)
static inline float maxcomp(float3 v) { return fmax(v.x, fmax(v.y, v.z)); }
#define NULL 0
static inline ushort4 make_ushort4(ushort a, ushort b, ushort c, ushort d)
{
ushort4 res;
res.x = a;
res.y = b;
res.z = c;
res.w = d;
return res;
}
static inline void atomic_addf(volatile __global float *source, const float operand)
{
union {
unsigned int intVal;
float floatVal;
} newVal;
union {
unsigned int intVal;
float floatVal;
} prevVal;
do {
prevVal.floatVal = *source;
newVal.floatVal = prevVal.floatVal + operand;
} while (atomic_cmpxchg((volatile global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);
}
static inline float dot3 (const float4 u, const float4 v) { return (u.x*v.x + u.y*v.y + u.z*v.z); }
typedef struct float4x4T
{
float4 m_col[4];
} float4x4;
typedef struct float3x3T
{
float3 row[3];
} float3x3;
static inline float2 make_float2(float a, float b)
{
float2 res;
res.x = a;
res.y = b;
return res;
}
static inline float3 make_float3(float a, float b, float c)
{
float3 res;
res.x = a;
res.y = b;
res.z = c;
return res;
}
static inline float4 make_float4(float a, float b, float c, float d)
{
float4 res;
res.x = a;
res.y = b;
res.z = c;
res.w = d;
return res;
}
static inline float2 to_float2(float4 f4)
{
float2 res;
res.x = f4.x;
res.y = f4.y;
return res;
}
static inline float3 to_float3(float4 f4)
{
float3 res;
res.x = f4.x;
res.y = f4.y;
res.z = f4.z;
return res;
}
static inline float4 to_float4(float3 v, float w)
{
float4 res;
res.x = v.x;
res.y = v.y;
res.z = v.z;
res.w = w;
return res;
}
static inline float3 mul4x3(float4x4 m, float3 v)
{
float3 res;
res.x = v.x * m.m_col[0].x + v.y * m.m_col[1].x + v.z * m.m_col[2].x + m.m_col[3].x;
res.y = v.x * m.m_col[0].y + v.y * m.m_col[1].y + v.z * m.m_col[2].y + m.m_col[3].y;
res.z = v.x * m.m_col[0].z + v.y * m.m_col[1].z + v.z * m.m_col[2].z + m.m_col[3].z;
return res;
}
static inline float3 mul3x3(float4x4 m, float3 v)
{
float3 res;
res.x = v.x * m.m_col[0].x + v.y * m.m_col[1].x + v.z * m.m_col[2].x;
res.y = v.x * m.m_col[0].y + v.y * m.m_col[1].y + v.z * m.m_col[2].y;
res.z = v.x * m.m_col[0].z + v.y * m.m_col[1].z + v.z * m.m_col[2].z;
return res;
}
static inline float2 sincos2f(float a_value)
{
float cosVal;
float sinVal = sincos(a_value, &cosVal);
return make_float2(sinVal, cosVal);
}
#else // Common C++
#ifdef WIN32
#define ALIGN_S(x) __declspec(align(x))
#else
#define ALIGN_S(x) __attribute__ ((aligned (x)))
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846f
#endif
#include "../../HydraAPI/hydra_api/LiteMath.h"
using namespace LiteMath;
#include "../../HydraAPI/hydra_api/HR_HDRImage.h"
typedef HydraRender::HDRImage4f HDRImage4f;
typedef unsigned int uint;
typedef unsigned short ushort;
typedef struct float3x3T
{
float3 row[3];
} float3x3;
static inline float2 sincos2f(float a_value)
{
return make_float2(sin(a_value), cos(a_value));
}
#define __global
#define __constant const
#define __private
#define __read_only
typedef int image1d_t;
typedef int image1d_buffer_t;
typedef int image2d_t;
typedef int sampler_t;
const int CLK_NORMALIZED_COORDS_TRUE = 1;
const int CLK_NORMALIZED_COORDS_FALSE = 2;
const int CLK_ADDRESS_CLAMP = 4;
const int CLK_FILTER_NEAREST = 8;
const int CLK_FILTER_LINEAR = 16;
const int CLK_ADDRESS_REPEAT = 32;
#define COMMON_CPLUS_PLUS_CODE 1
static inline int as_int(float x) { return reinterpret_cast<int&> (x); }
static inline float as_float(int x) { return reinterpret_cast<float&>(x); }
#define _PACKED
typedef unsigned short half;
static inline void vstore_half(float data, size_t offset, __global half *p) { p[offset] = 0; }
static inline float sign(float a) { return (a > 0.0f) ? 1.0f : -1.0f; }
static inline int2 make_int2(int a, int b) { int2 res; res.x = a; res.y = b; return res; }
using std::isinf;
#define ENABLE_OPACITY_TEX 1
#define SHADOW_TRACE_COLORED_SHADOWS 1
#define ENABLE_BLINN 1
#include "globals_sys.h"
#endif
#endif
typedef __global const int4* texture2d_t;
#ifndef INFINITY
#define INFINITY (1e38f)
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
typedef struct MatSampleT
{
float3 color;
float3 direction;
float pdf;
int flags;
} MatSample;
enum FLAG_BITS{HRT_COMPUTE_SHADOWS = 1,
HRT_DISABLE_SHADING = 2,
HRT_DIRECT_LIGHT_MODE = 4,
HRT_UNIFIED_IMAGE_SAMPLING = 8,
HRT_PRODUCTION_IMAGE_SAMPLING = 16, // 256 coherent rays per pixel.
HRT_USE_MIS = 32,
HRT_DUMMY2 = 64, // !!!!!!!! DONT USE THIS FLAG !!!! UNKNOWN BUG, PT DOES NOT CONTRIBUTE TO SCREEN.
HRT_STORE_SUBPIXELS = 128,
HRT_FORWARD_TRACING = 256, /// tracing from light to eye; otherwise from eye to light.
HRT_DRAW_LIGHT_LT = 512,
HRT_3WAY_MIS_WEIGHTS = 1024,
HRT_STORE_RAY_SAMPLES = 8192,
HRT_ENABLE_MMLT = 16384,
HRT_ENABLE_SBPT = 32768,
HRT_INDIRECT_LIGHT_MODE = 65536,
HRT_STUPID_PT_MODE = 65536*8,
HRT_NO_RANDOM_LIGHTS_SELECT = 65536*16,
HRT_DUMMY5 = 65536*32, //
HRT_DUMMY6 = 65536*64, // tracing photons to form spetial photonmap to speed-up direct light sampling
HRT_DUMMY7 = 65536*128,
HRT_DUMMY8 = 65536*256,
HRT_ENABLE_PT_CAUSTICS = 65536*2048,
HRT_USE_BOTH_PHOTON_MAPS = 65536*4096,
HRT_ENABLE_QMC_ONE_SEED = 65536*8192, // !!!!!!!! DONT MOVE THIS FLAG !!!! See random generator implementation
HRT_ENABLE_COHERENT_PT = 65536*16384,
};
enum BACK_MODE {MODE_CAM_PROJECTED = 0, MODE_SPHERICAL = 1};
enum VARIABLE_NAMES { // int vars
//
HRT_ENABLE_DOF = 0,
HRT_QMC_VARIANT = 1,
HRT_FIRST_BOUNCE_STORE_CACHE = 2,
HRT_ENABLE_MRAYS_COUNTERS = 3,
HRT_DEBUG_OUTPUT = 4,
HRT_MEASURE_RAYS_TYPE = 5,
HRT_BLACK_DIFFUSE_OFFSET = 6,
HRT_STORE_SHADOW_COLOR_W = 7,
HRT_WHITE_DIFFUSE_OFFSET = 8,
HRT_TRACE_DEPTH = 9,
HRT_PHOTONS_STORE_BOUNCE = 10,
HRT_PHOTONS_GARTHER_BOUNCE = 11,
HRT_RAYS_APPENDBUFFER_SIZE = 12,
HRT_DIFFUSE_TRACE_DEPTH = 13,
HRT_DISPLAY_IC_INTERMEDIATE = 14,
HRT_PT_FILTER_TYPE = 15,
HRT_ENABLE_BAKE = 16,
HRT_SILENT_MODE = 17,
HRT_VAR_ENABLE_RR = 18,
HRT_RENDER_LAYER = 19,
HRT_RENDER_LAYER_DEPTH = 20,
HRT_IC_ENABLED = 21,
HRT_IMAP_ENABLED = 22,
HRT_SPHEREMAP_TEXID0 = 23,
HRT_SPHEREMAP_TEXID1 = 24,
HRT_USE_GAMMA_FOR_ENV = 25,
HRT_HRT_SCENE_HAVE_PORTALS = 26,
HRT_SPHEREMAP_TEXMATRIXID0 = 27,
HRT_SPHEREMAP_TEXMATRIXID1 = 28,
HRT_ENABLE_PATH_REGENERATE = 29,
HRT_ENV_PDF_TABLE_ID = 30,
HRT_MLT_MAX_NUMBERS = 31,
HRT_MLT_ITERS_MULT = 32,
HRT_MMLT_BURN_ITERS = 33,
HRT_MMLT_FIRST_BOUNCE = 34,
HRT_SHADOW_MATTE_BACK = 35,
HRT_MAX_SAMPLES_PER_PIXEL = 36,
HRT_CONTRIB_SAMPLES = 37,
HRT_BOX_MODE_ON = 38,
HRT_KMLT_OR_QMC_LGT_BOUNCES = 39,
HRT_KMLT_OR_QMC_MAT_BOUNCES = 40,
HRT_SHADOW_MATTE_BACK_MODE = 41,
HRT_SHADOW_MATTE_BACK_COLOR_X= 42,
HRT_SHADOW_MATTE_BACK_COLOR_Y= 43,
HRT_SHADOW_MATTE_BACK_COLOR_Z= 44,
};
enum VARIABLE_FLOAT_NAMES{ // float vars
//
HRT_DOF_LENS_RADIUS = 0,
HRT_DOF_FOCAL_PLANE_DIST = 1,
HRT_TILT_ROT_X = 2,
HRT_TRACE_PROCEEDINGS_TRESHOLD = 3,
HRT_TILT_ROT_Y = 4,
HRT_CAUSTIC_POWER_MULT = 5,
HRT_IMAGE_GAMMA = 6,
HRT_TEXINPUT_GAMMA = 7,
HRT_ENV_COLOR_X = 8,
HRT_ENV_COLOR_Y = 9,
HRT_ENV_COLOR_Z = 10,
HRT_ENV_COLOR2_X = 11,
HRT_ENV_COLOR2_Y = 12,
HRT_ENV_COLOR2_Z = 13,
HRT_CAM_FOV = 14,
HRT_PATH_TRACE_ERROR = 15,
HRT_PATH_TRACE_CLAMPING = 16,
HRT_DUMMY_VARIABLE = 17,
HRT_BSPHERE_CENTER_X = 18,
HRT_BSPHERE_CENTER_Y = 19,
HRT_BSPHERE_CENTER_Z = 20,
HRT_BSPHERE_RADIUS = 21,
HRT_GVOXEL_SIZE = 22,
HRT_FOV_X = 23, // viewport parameters
HRT_FOV_Y = 24,
HRT_WIDTH_F = 25,
HRT_HEIGHT_F = 26,
HRT_ABLOW_OFFSET_X = 27,
HRT_ABLOW_OFFSET_Y = 28,
HRT_ABLOW_SCALE_X = 29,
HRT_ABLOW_SCALE_Y = 30,
HRT_MMLT_IMPLICIT_FIXED_PROB = 31,
HRT_MMLT_STEP_SIZE_POWER = 32,
HRT_MMLT_STEP_SIZE_COEFF = 33,
HRT_MLT_SCREEN_SCALE_X = 34,
HRT_MLT_SCREEN_SCALE_Y = 35,
HRT_BACK_TEXINPUT_GAMMA = 36,
};
enum RENDER_LAYER {
LAYER_COLOR = 0,
LAYER_POSITIONS = 1,
LAYER_NORMALS = 2,
LAYER_TEXCOORD = 3,
LAYER_TEXCOLOR_AND_MATERIAL = 4, // material mask
LAYER_INCOMING_PRIMARY = 5, // incoming primary
LAYER_INCOMING_RADIANCE = 6, // incoming secondary
LAYER_COLOR_PRIMARY_AND_REST = 7, // primary + refractions and other bounces
LAYER_COLOR_THE_REST = 8,
LAYER_PRIMARY = 9,
LAYER_SECONDARY = 10
}; // refractions, and other bounces
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static inline uint ZIndex(ushort x, ushort y, __constant ushort* a_mortonTable256)
{
return (a_mortonTable256[y >> 8] << 17) |
(a_mortonTable256[x >> 8] << 16) |
(a_mortonTable256[y & 0xFF] << 1 ) |
(a_mortonTable256[x & 0xFF] );
}
static inline ushort ExtractFromZIndex3D(uint zIndex, int stride)
{
uint result = 0;
for (int i = 0; i < 10; i++)
{
int bitBask = 1 << (3 * i + stride);
int bit = (bitBask & zIndex) ? 1 : 0;
result |= (bit << i);
}
return (ushort)result;
}
static inline ushort ExtractFromZIndex2D(uint zIndex, int stride)
{
uint result = 0;
for (int i = 0; i < 16; i++)
{
int bitBask = 1 << (2 * i + stride);
int bit = (bitBask & zIndex) ? 1 : 0;
result |= (bit << i);
}
return (ushort)result;
}
static inline ushort ExtractXFromZIndex(uint zIndex)
{
uint result = 0;
for (int i = 0; i<16; i++)
result |= ((1 << (2 * i)) & zIndex) >> i;
return (ushort)result;
}
static inline ushort ExtractYFromZIndex(uint zIndex)
{
uint result = 0;
for (int i = 0; i<16; i++)
result |= ((1 << (2 * i + 1)) & zIndex) >> i;
return (ushort)(result >> 1);
}
static inline int blocks(int elems, int threadsPerBlock)
{
if (elems % threadsPerBlock == 0 && elems >= threadsPerBlock)
return elems / threadsPerBlock;
else
return (elems / threadsPerBlock) + 1;
}
static inline size_t blocksST(size_t elems, int threadsPerBlock)
{
if (elems % threadsPerBlock == 0 && elems >= threadsPerBlock)
return elems / threadsPerBlock;
else
return (elems / threadsPerBlock) + 1;
}
static inline size_t roundBlocks(size_t elems, int threadsPerBlock)
{
if (elems < threadsPerBlock)
return (size_t)threadsPerBlock;
else
return blocksST(elems, threadsPerBlock) * threadsPerBlock;
}
static inline uint Index2D(uint x, uint y, int pitch) { return y*pitch + x; }
static inline uint IndexZBlock2D(int x, int y, int pitch, __constant ushort* a_mortonTable) // window_size[0]
{
uint zOrderX = x % Z_ORDER_BLOCK_SIZE;
uint zOrderY = y % Z_ORDER_BLOCK_SIZE;
uint zIndex = ZIndex(zOrderX, zOrderY, a_mortonTable);
uint wBlocks = pitch / Z_ORDER_BLOCK_SIZE;
uint blockX = x / Z_ORDER_BLOCK_SIZE;
uint blockY = y / Z_ORDER_BLOCK_SIZE;
return (blockX + (blockY)*(wBlocks))*Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE + zIndex;
}
static inline ushort2 GetXYFromZBlockIndex(uint a_offset, int w, int h)
{
int blocksSizeX = w / Z_ORDER_BLOCK_SIZE;
//int blocksSizeY = h / Z_ORDER_BLOCK_SIZE;
int blockId = a_offset / (Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE);
int zIdInBlock = a_offset % (Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE);
int blockY = blockId / blocksSizeX;
int blockX = blockId - blockY*blocksSizeX;
int localX = (int)ExtractXFromZIndex(zIdInBlock);
int localY = (int)ExtractYFromZIndex(zIdInBlock);
ushort2 res;
res.x = (ushort)(blockX*Z_ORDER_BLOCK_SIZE + localX);
res.y = (ushort)(blockY*Z_ORDER_BLOCK_SIZE + localY);
return res;
}
static inline uint SpreadBits(int x, int offset)
{
x = (x | (x << 10)) & 0x000F801F;
x = (x | (x << 4)) & 0x00E181C3;
x = (x | (x << 2)) & 0x03248649;
x = (x | (x << 2)) & 0x09249249;
return (uint)(x) << offset;
}
static inline uint GetMortonNumber(int x, int y, int z)
{
return SpreadBits(x, 0) | SpreadBits(y, 1) | SpreadBits(z, 2);
}
static inline float3 reflect(float3 dir, float3 normal)
{
// Do not use this function for "wo" and "wh" microfacets terms.
// They need the formula: 2.0f * dot(wo, wh) * wh - wo;
return normalize((normal * dot(dir, normal) * (-2.0f)) + dir);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
///// a simple tone mapping
static inline float3 ToneMapping(float3 color) { return make_float3(fmin(color.x, 1.0f), fmin(color.y, 1.0f), fmin(color.z, 1.0f)); }
static inline float4 ToneMapping4(float4 color) { return make_float4(fmin(color.x, 1.0f), fmin(color.y, 1.0f), fmin(color.z, 1.0f), fmin(color.w, 1.0f)); }
/////////////////////////////////////////////////////////////////////////////////////////////////////////
////
static inline uint RealColorToUint32_f3(float3 real_color)
{
float r = real_color.x*255.0f;
float g = real_color.y*255.0f;
float b = real_color.z*255.0f;
unsigned char red = (unsigned char)r, green = (unsigned char)g, blue = (unsigned char)b;
return red | (green << 8) | (blue << 16) | 0xFF000000;
}
static inline uint RealColorToUint32(float4 real_color)
{
float r = real_color.x*255.0f;
float g = real_color.y*255.0f;
float b = real_color.z*255.0f;
float a = real_color.w*255.0f;
unsigned char red = (unsigned char)r;
unsigned char green = (unsigned char)g;
unsigned char blue = (unsigned char)b;
unsigned char alpha = (unsigned char)a;
return red | (green << 8) | (blue << 16) | (alpha << 24);
}
static inline float3 SafeInverse(float3 d)
{
const float ooeps = 1.0e-36f; // Avoid div by zero.
float3 res;
res.x = 1.0f / (fabs(d.x) > ooeps ? d.x : copysign(ooeps, d.x));
res.y = 1.0f / (fabs(d.y) > ooeps ? d.y : copysign(ooeps, d.y));
res.z = 1.0f / (fabs(d.z) > ooeps ? d.z : copysign(ooeps, d.z));
return res;
}
static inline float epsilonOfPos(float3 hitPos) { return fmax(fmax(fabs(hitPos.x), fmax(fabs(hitPos.y), fabs(hitPos.z))), 2.0f*GEPSILON)*GEPSILON; }
static inline float misHeuristicPower1(float p) { return isfinite(p) ? fabs(p) : 0.0f; }
static inline float misHeuristicPower2(float p) { return isfinite(p*p) ? p*p : 0.0f; }
static inline float misWeightHeuristic(float a, float b)
{
const float w = misHeuristicPower1(a) / fmax(misHeuristicPower1(a) + misHeuristicPower1(b), DEPSILON2);
return isfinite(w) ? w : 0.0f;
}
static inline float misWeightHeuristic3(float a, float b, float c)
{
const float w = fabs(a) / fmax(misHeuristicPower1(a) + misHeuristicPower1(b) + misHeuristicPower1(c), DEPSILON2);
if(!isfinite(a))
return 1.0f;
else
return isfinite(w) ? w : 0.0f;
}
/**
\brief offset reflected ray position by epsilon;
\param a_hitPos - world space position on surface
\param a_surfaceNorm - surface normal at a_hitPos
\param a_sampleDir - ray direction in which we are going to trace reflected ray
\return offseted ray position
*/
static inline float3 OffsRayPos(const float3 a_hitPos, const float3 a_surfaceNorm, const float3 a_sampleDir)
{
const float signOfNormal2 = dot(a_sampleDir, a_surfaceNorm) < 0.0f ? -1.0f : 1.0f;
const float offsetEps = epsilonOfPos(a_hitPos);
return a_hitPos + signOfNormal2*offsetEps*a_surfaceNorm;
}
/**
\brief offset reflected ray position by epsilon;
\param a_hitPos - world space position on surface
\param a_surfaceNorm - surface normal at a_hitPos
\param a_sampleDir - ray direction in which we are going to trace reflected ray
\param a_shadowOffsAux - per poly auxilarry shadow offset.
\return offseted ray position
*/
static inline float3 OffsShadowRayPos(const float3 a_hitPos, const float3 a_surfaceNorm, const float3 a_sampleDir, const float a_shadowOffsAux)
{
const float signOfNormal2 = dot(a_sampleDir, a_surfaceNorm) < 0.0f ? -1.0f : 1.0f;
const float offsetEps = epsilonOfPos(a_hitPos);
return a_hitPos + signOfNormal2*(offsetEps + a_shadowOffsAux)*a_surfaceNorm;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static inline float4x4 make_float4x4(__global const float* a_data)
{
float4x4 matrix;
matrix.m_col[0] = make_float4(a_data[0], a_data[1], a_data[2], a_data[3]);
matrix.m_col[1] = make_float4(a_data[4], a_data[5], a_data[6], a_data[7]);
matrix.m_col[2] = make_float4(a_data[8], a_data[9], a_data[10], a_data[11]);
matrix.m_col[3] = make_float4(a_data[12], a_data[13], a_data[14], a_data[15]);
return matrix;
}
static inline float4x4 make_matrix_rotationX(float a_angle)
{
const float sinx = sin(a_angle);
const float cosx = cos(a_angle);
float4x4 res;
res.m_col[0] = make_float4(1.0f, 0.0f, 0.0f, 0.0f);
res.m_col[1] = make_float4(0.0f, +cosx, +sinx, 0.0f);
res.m_col[2] = make_float4(0.0f, -sinx, +cosx, 0.0f);
res.m_col[3] = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
return res;
}
static inline float4x4 make_matrix_rotationY(float a_angle)
{
const float siny = sin(a_angle);
const float cosy = cos(a_angle);
float4x4 res;
res.m_col[0] = make_float4(+cosy, 0.0f, -siny, 0.0f);
res.m_col[1] = make_float4(0.0f, 1.0f, 0.0f, 0.0f);
res.m_col[2] = make_float4(+siny, 0.0f, +cosy, 0.0f);
res.m_col[3] = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
return res;
}
static inline float4 mul4x4x4(float4x4 m, float4 v)
{
float4 res;
res.x = v.x * m.m_col[0].x + v.y * m.m_col[1].x + v.z * m.m_col[2].x + v.w * m.m_col[3].x;
res.y = v.x * m.m_col[0].y + v.y * m.m_col[1].y + v.z * m.m_col[2].y + v.w * m.m_col[3].y;
res.z = v.x * m.m_col[0].z + v.y * m.m_col[1].z + v.z * m.m_col[2].z + v.w * m.m_col[3].z;
res.w = v.x * m.m_col[0].w + v.y * m.m_col[1].w + v.z * m.m_col[2].w + v.w * m.m_col[3].w;
return res;
}
#ifndef COMMON_CPLUS_PLUS_CODE
static inline float3 mul(float4x4 m, float3 v)
{
float3 res;
res.x = v.x * m.m_col[0].x + v.y * m.m_col[1].x + v.z * m.m_col[2].x + m.m_col[3].x;
res.y = v.x * m.m_col[0].y + v.y * m.m_col[1].y + v.z * m.m_col[2].y + m.m_col[3].y;
res.z = v.x * m.m_col[0].z + v.y * m.m_col[1].z + v.z * m.m_col[2].z + m.m_col[3].z;
return res;
}
#endif
static inline float3x3 make_float3x3(float3 a, float3 b, float3 c)
{
float3x3 m;
m.row[0] = a;
m.row[1] = b;
m.row[2] = c;
return m;
}
static inline float3x3 make_float3x3_by_columns(float3 a, float3 b, float3 c)
{
float3x3 m;
m.row[0].x = a.x;
m.row[1].x = a.y;
m.row[2].x = a.z;
m.row[0].y = b.x;
m.row[1].y = b.y;
m.row[2].y = b.z;
m.row[0].z = c.x;
m.row[1].z = c.y;
m.row[2].z = c.z;
return m;
}
static inline float3 mul3x3x3(float3x3 m, const float3 v)
{
float3 res;
res.x = m.row[0].x*v.x + m.row[0].y*v.y + m.row[0].z*v.z;
res.y = m.row[1].x*v.x + m.row[1].y*v.y + m.row[1].z*v.z;
res.z = m.row[2].x*v.x + m.row[2].y*v.y + m.row[2].z*v.z;
return res;
}
static inline float3x3 mul3x3x3x3(float3x3 m1, float3x3 m2)
{
float3 column1 = mul3x3x3(m1, make_float3(m2.row[0].x, m2.row[1].x, m2.row[2].x));
float3 column2 = mul3x3x3(m1, make_float3(m2.row[0].y, m2.row[1].y, m2.row[2].y));
float3 column3 = mul3x3x3(m1, make_float3(m2.row[0].z, m2.row[1].z, m2.row[2].z));
return make_float3x3_by_columns(column1, column2, column3);
}
static inline float3x3 inverse(float3x3 a)
{
float det = a.row[0].x * (a.row[1].y * a.row[2].z - a.row[1].z * a.row[2].y) -
a.row[0].y * (a.row[1].x * a.row[2].z - a.row[1].z * a.row[2].x) +
a.row[0].z * (a.row[1].x * a.row[2].y - a.row[1].y * a.row[2].x);
float3x3 b;
b.row[0].x = (a.row[1].y * a.row[2].z - a.row[1].z * a.row[2].y);
b.row[0].y = -(a.row[0].y * a.row[2].z - a.row[0].z * a.row[2].y);
b.row[0].z = (a.row[0].y * a.row[1].z - a.row[0].z * a.row[1].y);
b.row[1].x = -(a.row[1].x * a.row[2].z - a.row[1].z * a.row[2].x);
b.row[1].y = (a.row[0].x * a.row[2].z - a.row[0].z * a.row[2].x);
b.row[1].z = -(a.row[0].x * a.row[1].z - a.row[0].z * a.row[1].x);
b.row[2].x = (a.row[1].x * a.row[2].y - a.row[1].y * a.row[2].x);
b.row[2].y = -(a.row[0].x * a.row[2].y - a.row[0].y * a.row[2].x);
b.row[2].z = (a.row[0].x * a.row[1].y - a.row[0].y * a.row[1].x);
float s = 1.0f / det;
b.row[0] *= s;
b.row[1] *= s;
b.row[2] *= s;
return b;
}
#ifndef COMMON_CPLUS_PLUS_CODE
static inline float4x4 inverse4x4(float4x4 m1)
{
float tmp[12]; // temp array for pairs
float4x4 m;
// calculate pairs for first 8 elements (cofactors)
//
tmp[0] = m1.m_col[2].z * m1.m_col[3].w;
tmp[1] = m1.m_col[3].z * m1.m_col[2].w;
tmp[2] = m1.m_col[1].z * m1.m_col[3].w;
tmp[3] = m1.m_col[3].z * m1.m_col[1].w;
tmp[4] = m1.m_col[1].z * m1.m_col[2].w;
tmp[5] = m1.m_col[2].z * m1.m_col[1].w;
tmp[6] = m1.m_col[0].z * m1.m_col[3].w;
tmp[7] = m1.m_col[3].z * m1.m_col[0].w;
tmp[8] = m1.m_col[0].z * m1.m_col[2].w;
tmp[9] = m1.m_col[2].z * m1.m_col[0].w;
tmp[10] = m1.m_col[0].z * m1.m_col[1].w;
tmp[11] = m1.m_col[1].z * m1.m_col[0].w;
// calculate first 8 m1.rowents (cofactors)
//
m.m_col[0].x = tmp[0] * m1.m_col[1].y + tmp[3] * m1.m_col[2].y + tmp[4] * m1.m_col[3].y;
m.m_col[0].x -= tmp[1] * m1.m_col[1].y + tmp[2] * m1.m_col[2].y + tmp[5] * m1.m_col[3].y;
m.m_col[0].y = tmp[1] * m1.m_col[0].y + tmp[6] * m1.m_col[2].y + tmp[9] * m1.m_col[3].y;
m.m_col[0].y -= tmp[0] * m1.m_col[0].y + tmp[7] * m1.m_col[2].y + tmp[8] * m1.m_col[3].y;
m.m_col[0].z = tmp[2] * m1.m_col[0].y + tmp[7] * m1.m_col[1].y + tmp[10] * m1.m_col[3].y;
m.m_col[0].z -= tmp[3] * m1.m_col[0].y + tmp[6] * m1.m_col[1].y + tmp[11] * m1.m_col[3].y;
m.m_col[0].w = tmp[5] * m1.m_col[0].y + tmp[8] * m1.m_col[1].y + tmp[11] * m1.m_col[2].y;
m.m_col[0].w -= tmp[4] * m1.m_col[0].y + tmp[9] * m1.m_col[1].y + tmp[10] * m1.m_col[2].y;
m.m_col[1].x = tmp[1] * m1.m_col[1].x + tmp[2] * m1.m_col[2].x + tmp[5] * m1.m_col[3].x;
m.m_col[1].x -= tmp[0] * m1.m_col[1].x + tmp[3] * m1.m_col[2].x + tmp[4] * m1.m_col[3].x;
m.m_col[1].y = tmp[0] * m1.m_col[0].x + tmp[7] * m1.m_col[2].x + tmp[8] * m1.m_col[3].x;
m.m_col[1].y -= tmp[1] * m1.m_col[0].x + tmp[6] * m1.m_col[2].x + tmp[9] * m1.m_col[3].x;
m.m_col[1].z = tmp[3] * m1.m_col[0].x + tmp[6] * m1.m_col[1].x + tmp[11] * m1.m_col[3].x;
m.m_col[1].z -= tmp[2] * m1.m_col[0].x + tmp[7] * m1.m_col[1].x + tmp[10] * m1.m_col[3].x;
m.m_col[1].w = tmp[4] * m1.m_col[0].x + tmp[9] * m1.m_col[1].x + tmp[10] * m1.m_col[2].x;
m.m_col[1].w -= tmp[5] * m1.m_col[0].x + tmp[8] * m1.m_col[1].x + tmp[11] * m1.m_col[2].x;
// calculate pairs for second 8 m1.rowents (cofactors)
//
tmp[0] = m1.m_col[2].x * m1.m_col[3].y;
tmp[1] = m1.m_col[3].x * m1.m_col[2].y;
tmp[2] = m1.m_col[1].x * m1.m_col[3].y;
tmp[3] = m1.m_col[3].x * m1.m_col[1].y;
tmp[4] = m1.m_col[1].x * m1.m_col[2].y;
tmp[5] = m1.m_col[2].x * m1.m_col[1].y;
tmp[6] = m1.m_col[0].x * m1.m_col[3].y;
tmp[7] = m1.m_col[3].x * m1.m_col[0].y;
tmp[8] = m1.m_col[0].x * m1.m_col[2].y;
tmp[9] = m1.m_col[2].x * m1.m_col[0].y;
tmp[10] = m1.m_col[0].x * m1.m_col[1].y;
tmp[11] = m1.m_col[1].x * m1.m_col[0].y;
// calculate second 8 m1 (cofactors)
//
m.m_col[2].x = tmp[0] * m1.m_col[1].w + tmp[3] * m1.m_col[2].w + tmp[4] * m1.m_col[3].w;
m.m_col[2].x -= tmp[1] * m1.m_col[1].w + tmp[2] * m1.m_col[2].w + tmp[5] * m1.m_col[3].w;
m.m_col[2].y = tmp[1] * m1.m_col[0].w + tmp[6] * m1.m_col[2].w + tmp[9] * m1.m_col[3].w;
m.m_col[2].y -= tmp[0] * m1.m_col[0].w + tmp[7] * m1.m_col[2].w + tmp[8] * m1.m_col[3].w;
m.m_col[2].z = tmp[2] * m1.m_col[0].w + tmp[7] * m1.m_col[1].w + tmp[10] * m1.m_col[3].w;
m.m_col[2].z -= tmp[3] * m1.m_col[0].w + tmp[6] * m1.m_col[1].w + tmp[11] * m1.m_col[3].w;
m.m_col[2].w = tmp[5] * m1.m_col[0].w + tmp[8] * m1.m_col[1].w + tmp[11] * m1.m_col[2].w;
m.m_col[2].w -= tmp[4] * m1.m_col[0].w + tmp[9] * m1.m_col[1].w + tmp[10] * m1.m_col[2].w;
m.m_col[3].x = tmp[2] * m1.m_col[2].z + tmp[5] * m1.m_col[3].z + tmp[1] * m1.m_col[1].z;
m.m_col[3].x -= tmp[4] * m1.m_col[3].z + tmp[0] * m1.m_col[1].z + tmp[3] * m1.m_col[2].z;
m.m_col[3].y = tmp[8] * m1.m_col[3].z + tmp[0] * m1.m_col[0].z + tmp[7] * m1.m_col[2].z;
m.m_col[3].y -= tmp[6] * m1.m_col[2].z + tmp[9] * m1.m_col[3].z + tmp[1] * m1.m_col[0].z;
m.m_col[3].z = tmp[6] * m1.m_col[1].z + tmp[11] * m1.m_col[3].z + tmp[3] * m1.m_col[0].z;
m.m_col[3].z -= tmp[10] * m1.m_col[3].z + tmp[2] * m1.m_col[0].z + tmp[7] * m1.m_col[1].z;
m.m_col[3].w = tmp[10] * m1.m_col[2].z + tmp[4] * m1.m_col[0].z + tmp[9] * m1.m_col[1].z;
m.m_col[3].w -= tmp[8] * m1.m_col[1].z + tmp[11] * m1.m_col[2].z + tmp[5] * m1.m_col[0].z;
// calculate matrix inverse
//
const float k = 1.0f / (m1.m_col[0].x * m.m_col[0].x + m1.m_col[1].x * m.m_col[0].y +
m1.m_col[2].x * m.m_col[0].z + m1.m_col[3].x * m.m_col[0].w);
const float4 vK = make_float4(k,k,k,k);
m.m_col[0] = m.m_col[0]*vK;
m.m_col[1] = m.m_col[1]*vK;
m.m_col[2] = m.m_col[2]*vK;
m.m_col[3] = m.m_col[3]*vK;
return m;
}
// Look At matrix creation
// return the inverse view matrix
//
static inline float4x4 lookAt(float3 eye, float3 center, float3 up)
{
float3 x, y, z; // basis; will make a rotation matrix
z.x = eye.x - center.x;
z.y = eye.y - center.y;
z.z = eye.z - center.z;
z = normalize(z);
y.x = up.x;
y.y = up.y;
y.z = up.z;
x = cross(y, z); // X vector = Y cross Z
y = cross(z, x); // Recompute Y = Z cross X
// cross product gives area of parallelogram, which is < 1.0 for
// non-perpendicular unit-length vectors; so normalize x, y here
x = normalize(x);
y = normalize(y);
float4x4 M;
M.m_col[0] = make_float4(x.x, y.x, z.x, 0.0f);
M.m_col[1] = make_float4(x.y, y.y, z.y, 0.0f);
M.m_col[2] = make_float4(x.z, y.z, z.z, 0.0f);
M.m_col[3] = make_float4(-x.x * eye.x - x.y * eye.y - x.z*eye.z,
-y.x * eye.x - y.y * eye.y - y.z*eye.z,
-z.x * eye.x - z.y * eye.y - z.z*eye.z,
1.0f );
return M;
}
static inline float4x4 transpose(const float4x4 a_mat)
{
float4x4 res;
res.m_col[0] = make_float4(a_mat.m_col[0].x, a_mat.m_col[1].x, a_mat.m_col[2].x, a_mat.m_col[3].x);
res.m_col[1] = make_float4(a_mat.m_col[0].y, a_mat.m_col[1].y, a_mat.m_col[2].y, a_mat.m_col[3].y);
res.m_col[2] = make_float4(a_mat.m_col[0].z, a_mat.m_col[1].z, a_mat.m_col[2].z, a_mat.m_col[3].z);
res.m_col[3] = make_float4(a_mat.m_col[0].w, a_mat.m_col[1].w, a_mat.m_col[2].w, a_mat.m_col[3].w);
return res;
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static inline float3 EyeRayDir(float x, float y, float w, float h, float4x4 a_mViewProjInv) // g_mViewProjInv
{
float4 pos = make_float4( 2.0f * (x + 0.5f) / w - 1.0f,
-2.0f * (y + 0.5f) / h + 1.0f,
0.0f,
1.0f );
pos = mul4x4x4(a_mViewProjInv, pos);
pos /= pos.w;
pos.y *= (-1.0f);
return normalize(to_float3(pos));
}
static inline void matrix4x4f_mult_ray3(float4x4 a_mWorldViewInv, __private float3* ray_pos, __private float3* ray_dir) // g_mWorldViewInv
{
float3 pos = mul(a_mWorldViewInv, (*ray_pos));
float3 pos2 = mul(a_mWorldViewInv, ((*ray_pos) + 100.0f*(*ray_dir)));
float3 diff = pos2 - pos;
(*ray_pos) = pos;
(*ray_dir) = normalize(diff);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
////
static inline float3 matrix3x3f_mult_float3(__global const float* M, float3 v)
{
float3 res;
res.x = M[0 * 3 + 0] * v.x + M[0 * 3 + 1] * v.y + M[0 * 3 + 2] * v.z;
res.y = M[1 * 3 + 0] * v.x + M[1 * 3 + 1] * v.y + M[1 * 3 + 2] * v.z;
res.z = M[2 * 3 + 0] * v.x + M[2 * 3 + 1] * v.y + M[2 * 3 + 2] * v.z;
return res;
}
static inline float3x3 RotateAroundVector3x3(float3 v, float rotAngle)
{
const float cos_t = cos(rotAngle);
const float sin_t = sin(rotAngle);
float3x3 m;
m.row[0].x = (1.0f - cos_t)*v.x*v.x + cos_t;
m.row[0].y = (1.0f - cos_t)*v.x*v.y - sin_t*v.z;
m.row[0].z = (1.0f - cos_t)*v.x*v.z + sin_t*v.y;
m.row[1].x = (1.0f - cos_t)*v.y*v.x + sin_t*v.z;
m.row[1].y = (1.0f - cos_t)*v.y*v.y + cos_t;
m.row[1].z = (1.0f - cos_t)*v.y*v.z - sin_t*v.x;
m.row[2].x = (1.0f - cos_t)*v.x*v.z - sin_t*v.y;
m.row[2].y = (1.0f - cos_t)*v.z*v.y + sin_t*v.x;
m.row[2].z = (1.0f - cos_t)*v.z*v.z + cos_t;
return m;
}
static inline float4x4 RotateAroundVector4x4(float3 v, float rotAngle)
{
const float cos_t = cos(rotAngle);
const float sin_t = sin(rotAngle);
float4x4 m;
m.m_col[0].x = (1.0f - cos_t)*v.x*v.x + cos_t;
m.m_col[1].x = (1.0f - cos_t)*v.x*v.y - sin_t*v.z;
m.m_col[2].x = (1.0f - cos_t)*v.x*v.z + sin_t*v.y;
m.m_col[3].x = 1.0f;
m.m_col[0].y = (1.0f - cos_t)*v.y*v.x + sin_t*v.z;
m.m_col[1].y = (1.0f - cos_t)*v.y*v.y + cos_t;
m.m_col[2].y = (1.0f - cos_t)*v.y*v.z - sin_t*v.x;
m.m_col[3].y = 1.0f;
m.m_col[0].z = (1.0f - cos_t)*v.x*v.z - sin_t*v.y;
m.m_col[1].z = (1.0f - cos_t)*v.z*v.y + sin_t*v.x;
m.m_col[2].z = (1.0f - cos_t)*v.z*v.z + cos_t;
m.m_col[3].z = 1.0f;
m.m_col[0].w = 0.0f;
m.m_col[1].w = 0.0f;
m.m_col[2].w = 0.0f;
m.m_col[3].w = 1.0f;
return m;
}
static inline float DistanceSquared(float3 a, float3 b)
{
float3 diff = b - a;
return dot(diff, diff);
}
static inline float UniformConePdf(float cosThetaMax) { return 1.0f / (2.0f * M_PI * (1.0f - cosThetaMax)); }
static inline float3 UniformSampleSphere(float u1, float u2)
{
float z = 1.0f - 2.0f * u1;
float r = sqrt(fmax(0.0f, 1.0f - z*z));
float phi = 2.0f * M_PI * u2;
float x = r * cos(phi);
float y = r * sin(phi);
return make_float3(x, y, z);
}
static inline float lerp2(float t, float a, float b)
{
return (1.0f - t) * a + t * b;
}
static inline float3 UniformSampleCone(float u1, float u2, float costhetamax, float3 x, float3 y, float3 z)
{
float costheta = lerp2(u1, costhetamax, 1.0f);
float sintheta = sqrt(1.0f - costheta*costheta);
float phi = u2 * 2.0f * M_PI;
return cos(phi) * sintheta * x + sin(phi) * sintheta * y + costheta * z;
}
static inline float2 RaySphereIntersect(float3 rayPos, float3 rayDir, float3 sphPos, float radius)
{
float3 k = rayPos - sphPos;
float b = dot(k, rayDir);
float c = dot(k, k) - radius*radius;
float d = b * b - c;
float2 res;
if (d >= 0.0f)
{
float sqrtd = sqrt(d);
float t1 = -b - sqrtd;
float t2 = -b + sqrtd;
res.x = fmin(t1, t2);
res.y = fmax(t1, t2);
}
else
{
res.x = -1e28f;
res.y = -1e28f;
}
return res;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct ObjectListTriangle
{
float4 v1;
float4 v2;
float4 v3;
};
struct ObjectListSphere
{
float3 pos;
float r;
};
struct ObjectList
{
#ifndef OCL_COMPILER
ObjectList() { m_triangleCount = m_offset = dummy1 = dummy2 = 0; }
inline ObjectListTriangle* GetTriangles() const { return (ObjectListTriangle*)(((char*)this) + sizeof(ObjectList)); }
inline const ObjectListSphere* GetSpheres() const { return (const ObjectListSphere*)((char*)this + sizeof(ObjectList) + m_triangleCount*sizeof(ObjectListTriangle)); }
#endif
int m_offset;
int m_triangleCount;
int dummy1;
int dummy2;
};
static inline int GetNumTriangles(struct ObjectList ol) { return ol.m_triangleCount; }
static inline int GetOffset(struct ObjectList ol) { return ol.m_offset; }
static inline int GetNumPrimitives(struct ObjectList ol) { return GetNumTriangles(ol); }
struct ALIGN_S(16) Lite_HitT
{
float t;
int primId;
int instId;
int geomId;
};
typedef struct Lite_HitT Lite_Hit;
static inline Lite_Hit Make_Lite_Hit(float t, int a_treeId)
{
int a_geomId = 0;
Lite_Hit hit;
hit.t = t;
hit.primId = -1;
hit.instId = -1;
hit.geomId = (a_geomId & 0x3FFFFFFF) | ((a_treeId << 30) & 0xC0000000);
return hit;
}
static inline bool HitNone(const Lite_Hit a_hit) { return (a_hit.primId == -1) || !isfinite(a_hit.t); }
static inline bool HitSome(const Lite_Hit a_hit) { return (a_hit.primId != -1) && isfinite(a_hit.t); }
static inline int IS_LEAF(int a_leftOffsetAndLeaf) { return a_leftOffsetAndLeaf & 0x80000000; }
static inline int PACK_LEAF_AND_OFFSET(int a_leftOffset, int leaf) { return (a_leftOffset & 0x7fffffff) | (leaf & 0x80000000); }
static inline int EXTRACT_OFFSET(int a_leftOffsetAndLeaf) { return a_leftOffsetAndLeaf & 0x7fffffff; }
// a know about bit fields, but in CUDA they didn't work
//
struct BVHNodeT
{
#ifndef OCL_COMPILER
BVHNodeT()
{
m_leftOffsetAndLeaf = 0xffffffff;
m_escapeIndex = 0xffffffff;
m_boxMin = float3(INFINITY, INFINITY, INFINITY);
m_boxMax = float3(-INFINITY, -INFINITY, -INFINITY);
}
inline unsigned int Leaf() const { return (m_leftOffsetAndLeaf & 0x80000000) >> 31; }
inline void SetLeaf(unsigned int a_Leaf) { m_leftOffsetAndLeaf = (m_leftOffsetAndLeaf & 0x7fffffff) | ((a_Leaf) << 31); }
inline void SetLeftOffset(unsigned int in_offset) { m_leftOffsetAndLeaf = (m_leftOffsetAndLeaf & 0x80000000) | (in_offset & 0x7fffffff); }
inline void SetObjectListOffset(unsigned int in_offset)
{
if (Leaf())
SetLeftOffset(in_offset);
}
inline unsigned int GetLeftOffset() const { return m_leftOffsetAndLeaf & 0x7fffffff; }
inline unsigned int GetRightOffset() const { return GetLeftOffset() + 1; }
inline unsigned int GetObjectListOffset() const { return GetLeftOffset(); }
inline void SetInstance(unsigned int a_Leaf) { m_escapeIndex = a_Leaf; }
inline unsigned int Instance() const { return (m_escapeIndex == 1); }
#endif
float3 m_boxMin;
unsigned int m_leftOffsetAndLeaf;
float3 m_boxMax;
unsigned int m_escapeIndex;
};
typedef struct BVHNodeT BVHNode;
static inline bool IsValidNode(const BVHNode a_node) { return !((a_node.m_leftOffsetAndLeaf == 0xffffffff) && (a_node.m_escapeIndex == 0xffffffff)); }
struct _PACKED RayFlagsT
{
unsigned char diffuseBounceNum;
unsigned char bounceNum;
unsigned short otherFlags;
};
typedef struct RayFlagsT RayFlags;
enum MATERIAL_EVENT {
RAY_EVENT_S = 1, ///< Indicates Specular reflection or refraction (check for RAY_EVENT_T)
RAY_EVENT_D = 2, ///< Indicates Diffuse reflection or translucent (check for RAY_EVENT_T)
RAY_EVENT_G = 4, ///< Indicates GLossy reflection or refraction (check for RAY_EVENT_T)
RAY_EVENT_T = 8, ///< Indicates Transparensy or reftacrion.
RAY_EVENT_V = 16, ///< Indicates Volume scattering, not used for a while
RAY_EVENT_TOUT = 32, ///< Indicates Transparensy Outside of water or glass or e.t.c. (old RAY_IS_INSIDE_TRANSPARENT_OBJECT = 128)
RAY_EVENT_TNINGLASS = 64,
};
static inline bool isPureSpecular(const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_S) != 0 || (a_sample.flags & RAY_EVENT_T) != 0; }
static inline bool isDiffuse (const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_D) != 0; }
static inline bool isGlossy (const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_G) != 0; }
static inline bool isTransparent (const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_T) != 0; }
enum {
RAY_GRAMMAR_DIRECT_LIGHT = 64,
RAY_GRAMMAR_OUT_OF_SCENE = 128,
RAY_DUMMY_FLAG_NOT_USED = 256,
RAY_HIT_SURFACE_FROM_OTHER_SIDE = 2048,
RAY_IS_DEAD = 4096, // set when ray had account environment or died on the surface
RAY_SHADE_FROM_OTHER_SIDE = 8192,
RAY_SHADE_FROM_SKY_LIGHT = 16384,
RAY_WILL_DIE_NEXT_BOUNCE = 32768, // set when ray had account only light on next bounce and then immediately die
};
static inline uint unpackRayFlags(uint a_flags) { return ((a_flags & 0xFFFF0000) >> 16); }
static inline uint packRayFlags(uint a_oldData, uint a_flags) { return (a_oldData & 0x0000FFFF) | (a_flags << 16); }
static inline uint unpackBounceNum(uint a_flags) { return ((a_flags & 0x0000FF00) >> 8); }
static inline uint unpackBounceNumDiff(uint a_flags) { return (a_flags & 0x000000FF); }
static inline uint packBounceNum (uint a_oldData, uint a_bounceNum) { return (a_oldData & 0xFFFF00FF) | (a_bounceNum << 8); }
static inline uint packBounceNumDiff(uint a_oldData, uint a_bounceNum) { return (a_oldData & 0xFFFFFF00) | (a_bounceNum); }
static inline bool rayIsActiveS(RayFlags a_flags) { return (a_flags.otherFlags & (RAY_GRAMMAR_OUT_OF_SCENE | RAY_IS_DEAD)) == 0; }
static inline bool rayIsActiveU(uint a_flags) { return ( ((a_flags & 0xFFFF0000) >> 16) & (RAY_GRAMMAR_OUT_OF_SCENE | RAY_IS_DEAD)) == 0; }
static inline bool isEyeRay(uint a_flags)
{
const uint otherFlags = unpackRayFlags(a_flags);
const bool haveSomeNonSpecularReflections = (otherFlags & RAY_EVENT_D) || (otherFlags & RAY_EVENT_G);
return (unpackBounceNum(a_flags) == 0) || !haveSomeNonSpecularReflections;
// return (unpackBounceNum(a_flags) == 0);
}
/**
\brief This structure is used as transit to pass MIS-weights-important-data from previouce bounce to current (or from current to next).
*/
typedef struct MisDataT
{
float matSamplePdf; ///< previous angle pdf (pdfW) that were used for sampling material.
float cosThetaPrev; ///< previous angle cos; it allow to compute projected angle pdf (pdfWP = pdfW/cosThetaPrev);
int prevMaterialOffset; ///< offset in material buffer to material leaf (elemental brdf) that were sampled on prev bounce; it is needed to disable caustics;
int isSpecular; ///< indicate if bounce was pure specular;
} MisData;
static inline MisData makeInitialMisData()
{
MisData data;
data.matSamplePdf = 1.0f;
data.cosThetaPrev = 1.0f;
data.prevMaterialOffset = -1;
data.isSpecular = 1;
return data;
}
static inline unsigned int encodeNormal(float3 n)
{
const int x = (int)(n.x*32767.0f);
const int y = (int)(n.y*32767.0f);
const unsigned int sign = (n.z >= 0) ? 0 : 1;
const unsigned int sx = ((unsigned int)(x & 0xfffe) | sign);
const unsigned int sy = ((unsigned int)(y & 0xffff) << 16);
return (sx | sy);
}
static inline float3 decodeNormal(unsigned int a_data)
{
const unsigned int a_enc_x = (a_data & 0x0000FFFF);
const unsigned int a_enc_y = ((a_data & 0xFFFF0000) >> 16);
const float sign = (a_enc_x & 0x0001) ? -1.0f : 1.0f;
const float x = ((short)(a_enc_x & 0xfffe))*(1.0f / 32767.0f);
const float y = ((short)(a_enc_y & 0xffff))*(1.0f / 32767.0f);
const float z = sign*sqrt(fmax(1.0f - x*x - y*y, 0.0f));
return make_float3(x, y, z);
}
/*
struct ALIGN_S(16) HitPosNormT
{
float pos_x;
float pos_y;
float pos_z;
uint norm_xy;
#ifdef __CUDACC__
__device__ float3 GetNormal() const { return decodeNormal(norm_xy); }
__device__ void SetNormal(float3 a_norm) { norm_xy = encodeNormal(normalize(a_norm)); }
#endif
};
typedef struct HitPosNormT HitPosNorm;
static inline HitPosNorm make_HitPosNorm(float4 a_data)
{
HitPosNorm res;
res.pos_x = a_data.x;
res.pos_y = a_data.y;
res.pos_z = a_data.z;
res.norm_xy = (uint)(as_int(a_data.w));
return res;
}
static inline float3 GetPos(HitPosNorm a_data) { return make_float3(a_data.pos_x, a_data.pos_y, a_data.pos_z); }
static inline void SetPos(__private HitPosNorm* a_pData, float3 a_pos) { a_pData->pos_x = a_pos.x; a_pData->pos_y = a_pos.y; a_pData->pos_z = a_pos.z; }
*/
struct ALIGN_S(8) HitTexCoordT
{
float tex_u;
float tex_v;
};
typedef struct HitTexCoordT HitTexCoord;
struct ALIGN_S(8) HitMatRefT
{
int m_data;
float accumDist;
};
typedef struct HitMatRefT HitMatRef;
static inline int GetMaterialId(HitMatRef a_hitMat) { return a_hitMat.m_data; }
static inline void SetHitType(__private HitMatRef* a_pHitMat, int a_id)
{
int mask = a_id << 28;
int m_data2 = a_pHitMat->m_data & 0x0FFFFFFF;
a_pHitMat->m_data = m_data2 | mask;
}
static inline void SetMaterialId(__private HitMatRef* a_pHitMat, int a_mat_id)
{
int mask = a_mat_id & 0x0FFFFFFF;
int m_data2 = a_pHitMat->m_data & 0xF0000000;
a_pHitMat->m_data = m_data2 | mask;
}
struct ALIGN_S(8) Hit_Part4T
{
uint tangentCompressed;
uint bitangentCompressed;
};
typedef struct Hit_Part4T Hit_Part4;
static inline void CoordinateSystem(float3 v1, __private float3* v2, __private float3* v3)
{
float invLen = 1.0f;
if (fabs(v1.x) > fabs(v1.y))
{
invLen = 1.0f / sqrt(v1.x*v1.x + v1.z*v1.z);
(*v2) = make_float3((-1.0f) * v1.z * invLen, 0.0f, v1.x * invLen);
}
else
{
invLen = 1.0f / sqrt(v1.y * v1.y + v1.z * v1.z);
(*v2) = make_float3(0.0f, v1.z * invLen, (-1.0f) * v1.y * invLen);
}
(*v3) = cross(v1, (*v2));
}
static inline float3 MapSampleToCosineDistribution(float r1, float r2, float3 direction, float3 hit_norm, float power)
{
if(power >= 1e6f)
return direction;
float sin_phi = sin(2.0f*r1*3.141592654f);
float cos_phi = cos(2.0f*r1*3.141592654f);
//sincos(2.0f*r1*3.141592654f, &sin_phi, &cos_phi);
float cos_theta = pow(1.0f - r2, 1.0f / (power + 1.0f));
float sin_theta = sqrt(1.0f - cos_theta*cos_theta);
float3 deviation;
deviation.x = sin_theta*cos_phi;
deviation.y = sin_theta*sin_phi;
deviation.z = cos_theta;
float3 ny = direction, nx, nz;
CoordinateSystem(ny, &nx, &nz);
{
float3 temp = ny;
ny = nz;
nz = temp;
}
float3 res = nx*deviation.x + ny*deviation.y + nz*deviation.z;
float invSign = dot(direction, hit_norm) > 0.0f ? 1.0f : -1.0f;
if (invSign*dot(res, hit_norm) < 0.0f) // reflected ray is below surface #CHECK_THIS
{
res = (-1.0f)*nx*deviation.x + ny*deviation.y - nz*deviation.z;
//belowSurface = true;
}
return res;
}
// Using the modified Phong reflectance model for physically based rendering
//
static inline float3 MapSampleToModifiedCosineDistribution(float r1, float r2, float3 direction, float3 hit_norm, float power,
bool* a_underSurface)
{
if (power >= 1e6f)
return direction;
// float sin_phi, cos_phi;
// sincosf(2 * r1*3.141592654f, &sin_phi, &cos_phi);
float sin_phi = sin(2.0f*r1*3.141592654f);
float cos_phi = cos(2.0f*r1*3.141592654f);
float sin_theta = sqrt(1.0f - pow(r2, 2.0f / (power + 1.0f)));
float3 deviation;
deviation.x = sin_theta*cos_phi;
deviation.y = sin_theta*sin_phi;
deviation.z = sqrt(1.0f - deviation.x*deviation.x - deviation.y*deviation.y); //pow(r2, 1.0f / (power + 1.0f));
float3 ny = direction, nx, nz;
CoordinateSystem(ny, &nx, &nz);
{
float3 temp = ny;
ny = nz;
nz = temp;
}
float3 res = nx*deviation.x + ny*deviation.y + nz*deviation.z;
(*a_underSurface) = false;
const float invSign = dot(direction, hit_norm) >= 0.0f ? 1.0f : -1.0f;
if (invSign*dot(res, hit_norm) < 0.0f) // reflected ray is below surface
{
res = (-1.0f)*nx*deviation.x - ny*deviation.y + nz*deviation.z;
(*a_underSurface) = true;
}
return res;
}
/**
\brief transform float2 sample in rect [-1,1]x[-1,1] to disc centered at (0,0) with radius == 1.
\param xy - input sample in rect [-1,1]x[-1,1]
\return position in disc
*/
static inline float2 MapSamplesToDisc(float2 xy)
{
float x = xy.x;
float y = xy.y;
float r = 0;
float phi = 0;
float2 res = xy;
if (x>y && x>-y)
{
r = x;
phi = 0.25f*3.141592654f*(y / x);
}
if (x < y && x > -y)
{
r = y;
phi = 0.25f*3.141592654f*(2.0f - x / y);
}
if (x < y && x < -y)
{
r = -x;
phi = 0.25f*3.141592654f*(4.0f + y / x);
}
if (x >y && x<-y)
{
r = -y;
phi = 0.25f*3.141592654f*(6 - x / y);
}
//float sin_phi, cos_phi;
//sincosf(phi, &sin_phi, &cos_phi);
float sin_phi = sin(phi);
float cos_phi = cos(phi);
res.x = r*sin_phi;
res.y = r*cos_phi;
return res;
}
static inline float3 MapSamplesToCone(float cosCutoff, float2 sample, float3 direction)
{
float cosTheta = (1.0f - sample.x) + sample.x * cosCutoff;
float sinTheta = sqrt(1.0f - cosTheta * cosTheta);
//float sinPhi, cosPhi;
//sincosf(2.0f * M_PI * sample.y, &sinPhi, &cosPhi);
float sinPhi = sin(2.0f * M_PI * sample.y);
float cosPhi = cos(2.0f * M_PI * sample.y);
float3 deviation = make_float3(cosPhi * sinTheta, sinPhi * sinTheta, cosTheta);
// transform to different basis
//
float3 ny = direction;
float3 nx, nz;
CoordinateSystem(ny, &nx, &nz);
//swap(ny, nz);
{
float3 temp = ny;
ny = nz;
nz = temp;
}
return nx*deviation.x + ny*deviation.y + nz*deviation.z;
}
static inline float3 MapSamplesToSphere(float r1, float r2) // [-1..1]
{
float phi = r1*3.141592654f * 2.0f; // [0 .. 2PI]
float h = r2*2.0f - 1.0f; // [-1 .. 1]
float sin_phi = sin(phi);
float cos_phi = cos(phi);
float x = sin_phi*sqrt(1 - h*h);
float y = cos_phi*sqrt(1 - h*h);
float z = h;
return make_float3(x, y, z);
}
struct ALIGN_S(16) ZBlockT
{
#ifndef OCL_COMPILER
#ifndef __CUDACC__
ZBlockT() { index = 0; diff = 100; counter = 0; index2 = 0; }
ZBlockT(int a_index, float a_diff)
{
index = a_index;
index2 = 0;
diff = a_diff;
counter = 0;
}
#endif
inline static int GetSize() { return Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE; }
inline int GetOffset() const { return index*GetSize(); }
inline bool operator<(const ZBlockT& rhs) const { return (diff < rhs.diff); }
#endif
int index; // just block offset if global screen buffer
int index2; // index in other buffer + avg trace depth
int counter; // how many times this block was traced?
float diff; // error in some units. stop criterion if fact
};
typedef struct ZBlockT ZBlock;
static bool BlockFinished(ZBlock block, int a_minRaysPerPixel, int a_maxRaysPerPixel, float* a_outDiff) // for use on the cpu side ... for current
{
int samplesPerPixel = block.counter; // was *2 due to odd and even staff
if(a_outDiff!=NULL)
*a_outDiff = block.diff;
float acceptedBadPixels = 8.0f; // sqrt((float)(CMP_RESULTS_BLOCK_SIZE));
int minRaysPerPixel = a_minRaysPerPixel;
bool summErrorOk = (block.diff <= acceptedBadPixels);
bool maxErrorOk = false;
return ((summErrorOk || maxErrorOk) && samplesPerPixel >= minRaysPerPixel) || (samplesPerPixel >= a_maxRaysPerPixel);
}
static inline uint ThreadSwizzle1D(uint pixelId, uint zBlockIndex)
{
uint indexInsideZBlock = pixelId%CMP_RESULTS_BLOCK_SIZE;
return zBlockIndex*CMP_RESULTS_BLOCK_SIZE + indexInsideZBlock;
}
static inline float PdfAtoW(const float aPdfA, const float aDist, const float aCosThere)
{
return (aPdfA*aDist*aDist) / fmax(aCosThere, DEPSILON2);
}
static inline float PdfWtoA(const float aPdfW, const float aDist, const float aCosThere)
{
return aPdfW * fabs(aCosThere) / fmax(aDist*aDist, DEPSILON2);
}
struct MRaysStat
{
#ifndef OCL_COMPILER
MRaysStat() { memset(this, 0, sizeof(MRaysStat)); }
#endif
int traceTimePerCent;
float raysPerSec;
float samplesPerSec;
float reorderTimeMs;
float traversalTimeMs;
float samLightTimeMs;
float shadowTimeMs;
float shadeTimeMs;
float bounceTimeMS;
float evalHitMs;
float nextBounceMs;
float sampleTimeMS;
};
static inline float probabilityAbsorbRR(uint a_flags, uint a_globalFlags)
{
if (a_globalFlags & HRT_ENABLE_MMLT) // metropolis don't use roultte
return 0.0f;
const uint diffBounceNum = unpackBounceNumDiff(a_flags);
const uint otherFlags = unpackRayFlags(a_flags);
float pabsorb = 0.0f;
if (diffBounceNum >= 4)
pabsorb = 0.50f;
else if (diffBounceNum >= 3)
pabsorb = 0.25f;
else
pabsorb = 0.0f;
return pabsorb;
}
static inline float MonteCarloVariance(float3 avgColor, float sqrColor, int nSamples)
{
const float maxColor = fmax(avgColor.x, fmax(avgColor.y, avgColor.z));
const float fnSamples = ((float)(nSamples));
const float nSampleInv = 1.0f / fnSamples;
return fabs(sqrColor*nSampleInv - (maxColor*maxColor*nSampleInv*nSampleInv));
}
static inline float MonteCarloRelErr(float maxColor, float sqrColor, int nSamples)
{
const float fnSamples = ((float)(nSamples));
const float nSampleInv = 1.0f / fnSamples;
const float variance = fabs(sqrColor*nSampleInv - (maxColor*maxColor*nSampleInv*nSampleInv));
const float stdError = sqrt(variance);
return stdError / (fmax(maxColor, 0.00001f));
}
static inline float MonteCarloRelErr2(float3 avgColor, float sqrColor, int nSamples)
{
const float maxColor = fmax(avgColor.x, fmax(avgColor.y, avgColor.z));
return MonteCarloRelErr(maxColor, sqrColor, nSamples);
}
static inline float colorSquareMax3(float3 calcColor)
{
float3 calcColorSqr;
calcColorSqr.x = calcColor.x*calcColor.x;
calcColorSqr.y = calcColor.y*calcColor.y;
calcColorSqr.z = calcColor.z*calcColor.z;
return fmax(calcColorSqr.x, fmax(calcColorSqr.y, calcColorSqr.z));
}
static inline float colorSquareMax4(float4 calcColor)
{
float4 calcColorSqr;
calcColorSqr.x = calcColor.x*calcColor.x;
calcColorSqr.y = calcColor.y*calcColor.y;
calcColorSqr.z = calcColor.z*calcColor.z;
calcColorSqr.w = calcColor.w*calcColor.w;
return fmax(calcColorSqr.x, fmax(calcColorSqr.y, calcColorSqr.z));
}
// Unpolarized fresnel reflection term for dielectric materials
// this formula is simplified and should be checked
//
static inline float fresnelCoeffSimple(float cosThetaI, float a_eta)
{
float g = sqrt(a_eta*a_eta - 1.0f + cosThetaI * cosThetaI);
float t1 = (g - cosThetaI) / (g + cosThetaI);
float t2 = (cosThetaI * (g + cosThetaI) - 1) / (cosThetaI * (g - cosThetaI) + 1.0f);
return 0.5f * t1 * t1 * (1.0f + t2 * t2);
}
// The following functions calculate the reflected and refracted
// directions in addition to the fresnel coefficients. Based on PBRT
// and the paper "Derivation of Refraction Formulas" by Paul S. Heckbert.
//
static inline float fresnelDielectric(float cosTheta1, float cosTheta2, float etaExt, float etaInt)
{
float Rs = (etaExt * cosTheta1 - etaInt * cosTheta2) / (etaExt * cosTheta1 + etaInt * cosTheta2);
float Rp = (etaInt * cosTheta1 - etaExt * cosTheta2) / (etaInt * cosTheta1 + etaExt * cosTheta2);
return (Rs * Rs + Rp * Rp) / 2.0f;
}
static inline float fresnelConductor(float cosTheta, float eta, float roughness)
{
float tmp = (eta*eta + roughness*roughness) * (cosTheta * cosTheta);
float rParl2 = (tmp - (eta * (2.0f * cosTheta)) + 1.0f) / (tmp + (eta * (2.0f * cosTheta)) + 1.0f);
float tmpF = eta*eta + roughness*roughness;
float rPerp2 = (tmpF - (eta * (2.0f * cosTheta)) + (cosTheta*cosTheta)) / (tmpF + (eta * (2.0f * cosTheta)) + (cosTheta*cosTheta));
return (rParl2 + rPerp2) / 2.0f;
}
static inline float fresnelReflectionCoeff(float cosTheta1, float etaExt, float etaInt)
{
// Swap the indices of refraction if the interaction starts
// at the inside of the object
//
if (cosTheta1 < 0.0f)
{
float temp = etaInt;
etaInt = etaExt;
etaExt = temp;
}
// Using Snell's law, calculate the sine of the angle
// between the transmitted ray and the surface normal
//
float sinTheta2 = etaExt / etaInt * sqrt(fmax(0.0f, 1.0f - cosTheta1*cosTheta1));
if (sinTheta2 > 1.0f)
return 1.0f; // Total internal reflection!
// Use the sin^2+cos^2=1 identity - max() guards against
// numerical imprecision
//
float cosTheta2 = sqrt(fmax(0.0f, 1.0f - sinTheta2*sinTheta2));
// Finally compute the reflection coefficient
//
return fresnelDielectric(fabs(cosTheta1), cosTheta2, etaInt, etaExt);
}
static inline float fresnelReflectionCoeffMentalLike(float cosTheta, float refractIOR)
{
return fresnelReflectionCoeff(fabs(cosTheta), 1.0f, refractIOR);
}
static inline float contribFunc(float3 color)
{
return fmax(0.33334f*(color.x + color.y + color.z), 0.0f);
}
static inline int packXY1616(int x, int y) { return (y << 16) | (x & 0x0000FFFF); }
// CPU and CUDA only code
//
#ifndef OCL_COMPILER
static inline float3 clamp3(float3 x, float a, float b) { return make_float3(fmin(fmax(x.x, a), b), fmin(fmax(x.y, a), b), fmin(fmax(x.z, a), b)); }
static unsigned short MortonTable256Host[] =
{
0x0000, 0x0001, 0x0004, 0x0005, 0x0010, 0x0011, 0x0014, 0x0015,
0x0040, 0x0041, 0x0044, 0x0045, 0x0050, 0x0051, 0x0054, 0x0055,
0x0100, 0x0101, 0x0104, 0x0105, 0x0110, 0x0111, 0x0114, 0x0115,
0x0140, 0x0141, 0x0144, 0x0145, 0x0150, 0x0151, 0x0154, 0x0155,
0x0400, 0x0401, 0x0404, 0x0405, 0x0410, 0x0411, 0x0414, 0x0415,
0x0440, 0x0441, 0x0444, 0x0445, 0x0450, 0x0451, 0x0454, 0x0455,
0x0500, 0x0501, 0x0504, 0x0505, 0x0510, 0x0511, 0x0514, 0x0515,
0x0540, 0x0541, 0x0544, 0x0545, 0x0550, 0x0551, 0x0554, 0x0555,
0x1000, 0x1001, 0x1004, 0x1005, 0x1010, 0x1011, 0x1014, 0x1015,
0x1040, 0x1041, 0x1044, 0x1045, 0x1050, 0x1051, 0x1054, 0x1055,
0x1100, 0x1101, 0x1104, 0x1105, 0x1110, 0x1111, 0x1114, 0x1115,
0x1140, 0x1141, 0x1144, 0x1145, 0x1150, 0x1151, 0x1154, 0x1155,
0x1400, 0x1401, 0x1404, 0x1405, 0x1410, 0x1411, 0x1414, 0x1415,
0x1440, 0x1441, 0x1444, 0x1445, 0x1450, 0x1451, 0x1454, 0x1455,
0x1500, 0x1501, 0x1504, 0x1505, 0x1510, 0x1511, 0x1514, 0x1515,
0x1540, 0x1541, 0x1544, 0x1545, 0x1550, 0x1551, 0x1554, 0x1555,
0x4000, 0x4001, 0x4004, 0x4005, 0x4010, 0x4011, 0x4014, 0x4015,
0x4040, 0x4041, 0x4044, 0x4045, 0x4050, 0x4051, 0x4054, 0x4055,
0x4100, 0x4101, 0x4104, 0x4105, 0x4110, 0x4111, 0x4114, 0x4115,
0x4140, 0x4141, 0x4144, 0x4145, 0x4150, 0x4151, 0x4154, 0x4155,
0x4400, 0x4401, 0x4404, 0x4405, 0x4410, 0x4411, 0x4414, 0x4415,
0x4440, 0x4441, 0x4444, 0x4445, 0x4450, 0x4451, 0x4454, 0x4455,
0x4500, 0x4501, 0x4504, 0x4505, 0x4510, 0x4511, 0x4514, 0x4515,
0x4540, 0x4541, 0x4544, 0x4545, 0x4550, 0x4551, 0x4554, 0x4555,
0x5000, 0x5001, 0x5004, 0x5005, 0x5010, 0x5011, 0x5014, 0x5015,
0x5040, 0x5041, 0x5044, 0x5045, 0x5050, 0x5051, 0x5054, 0x5055,
0x5100, 0x5101, 0x5104, 0x5105, 0x5110, 0x5111, 0x5114, 0x5115,
0x5140, 0x5141, 0x5144, 0x5145, 0x5150, 0x5151, 0x5154, 0x5155,
0x5400, 0x5401, 0x5404, 0x5405, 0x5410, 0x5411, 0x5414, 0x5415,
0x5440, 0x5441, 0x5444, 0x5445, 0x5450, 0x5451, 0x5454, 0x5455,
0x5500, 0x5501, 0x5504, 0x5505, 0x5510, 0x5511, 0x5514, 0x5515,
0x5540, 0x5541, 0x5544, 0x5545, 0x5550, 0x5551, 0x5554, 0x5555
};
static inline uint ZIndexHost(ushort x, ushort y)
{
return MortonTable256Host[y >> 8] << 17 |
MortonTable256Host[x >> 8] << 16 |
MortonTable256Host[y & 0xFF] << 1 |
MortonTable256Host[x & 0xFF];
}
static inline uint HostIndexZBlock2D(int x, int y, int pitch)
{
uint zOrderX = x % Z_ORDER_BLOCK_SIZE;
uint zOrderY = y % Z_ORDER_BLOCK_SIZE;
uint zIndex = ZIndexHost(zOrderX, zOrderY);
uint wBlocks = pitch / Z_ORDER_BLOCK_SIZE;
uint blockX = x / Z_ORDER_BLOCK_SIZE;
uint blockY = y / Z_ORDER_BLOCK_SIZE;
return (blockX + (blockY)*(wBlocks))*Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE + zIndex;
}
static void ImageZBlockMemToRowPitch(const float4* inData, float4* outData, int w, int h)
{
#pragma omp parallel for
for (int y = 0; y<h; y++)
{
for (int x = 0; x<w; x++)
{
int indexSrc = HostIndexZBlock2D(x, y, w);
int indexDst = Index2D(x, y, w);
outData[indexDst] = inData[indexSrc];
}
}
}
#endif
#ifdef __CUDACC__
#undef ushort
#undef uint
#endif
#define PREFIX_SUMM_MACRO(idata,odata,l_Data,_bsize) \
{ \
uint pos = 2 * LOCAL_ID_X - (LOCAL_ID_X & (_bsize - 1)); \
l_Data[pos] = 0; \
pos += _bsize; \
l_Data[pos] = idata; \
\
for (uint offset = 1; offset < _bsize; offset <<= 1) \
{ \
SYNCTHREADS_LOCAL; \
uint t = l_Data[pos] + l_Data[pos - offset]; \
SYNCTHREADS_LOCAL; \
l_Data[pos] = t; \
} \
\
odata = l_Data[pos]; \
} \
enum CLEAR_FLAGS{ CLEAR_MATERIALS = 1,
CLEAR_GEOMETRY = 2,
CLEAR_LIGHTS = 4,
CLEAR_TEXTURES = 8,
CLEAR_CUSTOM_DATA = 16,
CLEAR_ALL = CLEAR_MATERIALS | CLEAR_GEOMETRY | CLEAR_LIGHTS | CLEAR_TEXTURES | CLEAR_CUSTOM_DATA };
enum BVH_FLAGS { BVH_ENABLE_SMOOTH_OPACITY = 1};
typedef struct GBuffer1T
{
float depth;
float3 norm;
float4 rgba;
int matId;
float coverage;
} GBuffer1;
typedef struct GBuffer2T
{
float2 texCoord;
int objId;
int instId;
} GBuffer2;
typedef struct GBufferAll
{
GBuffer1 data1;
GBuffer2 data2;
} GBufferAll;
static inline void initGBufferAll(__private GBufferAll* a_pElem)
{
a_pElem->data1.depth = 1e+6f;
a_pElem->data1.norm = make_float3(0, 0, 0);
a_pElem->data1.rgba = make_float4(0, 0, 0, 1);
a_pElem->data1.matId = -1;
a_pElem->data1.coverage = 0.0f;
a_pElem->data2.texCoord = make_float2(0, 0);
a_pElem->data2.objId = -1;
a_pElem->data2.instId = -1;
}
#define GBUFFER_SAMPLES 16
#define PMPIX_SAMPLES 256 // Production Mode Pixel Samples
static inline float4 packGBuffer1(GBuffer1 a_input)
{
float4 resColor;
unsigned int packedRGBX = RealColorToUint32(a_input.rgba);
const float clampedCoverage = fmin(fmax(a_input.coverage*255.0f, 0.0f), 255.0f);
const int compressedCoverage = ((int)(clampedCoverage)) << 24;
const int packedMIdAncCov = (a_input.matId & 0x00FFFFFF) | (compressedCoverage & 0xFF000000);
resColor.x = a_input.depth;
resColor.y = as_float(encodeNormal(a_input.norm));
resColor.z = as_float(packedMIdAncCov);
resColor.w = as_float(packedRGBX);
return resColor;
}
static inline GBuffer1 unpackGBuffer1(float4 a_input)
{
GBuffer1 res;
res.depth = a_input.x;
res.norm = decodeNormal(as_int(a_input.y));
res.matId = as_int(a_input.z) & 0x00FFFFFF;
const int compressedCoverage = (as_int(a_input.z) & 0xFF000000) >> 24;
res.coverage = ((float)compressedCoverage)*(1.0f / 255.0f);
unsigned int rgba = as_int(a_input.w);
res.rgba.x = (rgba & 0x000000FF)*(1.0f / 255.0f);
res.rgba.y = ((rgba & 0x0000FF00) >> 8)*(1.0f / 255.0f);
res.rgba.z = ((rgba & 0x00FF0000) >> 16)*(1.0f / 255.0f);
res.rgba.w = ((rgba & 0xFF000000) >> 24)*(1.0f / 255.0f);
return res;
}
static inline float4 packGBuffer2(GBuffer2 a_input)
{
float4 res;
res.x = a_input.texCoord.x;
res.y = a_input.texCoord.y;
res.z = as_float(a_input.objId);
res.w = as_float(a_input.instId);
return res;
}
static inline GBuffer2 unpackGBuffer2(float4 a_input)
{
GBuffer2 res;
res.texCoord.x = a_input.x;
res.texCoord.y = a_input.y;
res.objId = as_int(a_input.z);
res.instId = as_int(a_input.w);
return res;
}
static inline float projectedPixelSize(float dist, float FOV, float w, float h)
{
float ppx = (FOV / w)*dist;
float ppy = (FOV / h)*dist;
if (dist > 0.0f)
return 2.0f*fmax(ppx, ppy);
else
return 1000.0f;
}
static inline float surfaceSimilarity(float4 data1, float4 data2, const float MADXDIFF)
{
const float MANXDIFF = 0.15f;
float3 n1 = to_float3(data1);
float3 n2 = to_float3(data2);
float dist = length(n1 - n2);
if (dist >= MANXDIFF)
return 0.0f;
float d1 = data1.w;
float d2 = data2.w;
if (fabs(d1 - d2) >= MADXDIFF)
return 0.0f;
float normalSimilar = sqrt(1.0f - (dist / MANXDIFF));
float depthSimilar = sqrt(1.0f - fabs(d1 - d2) / MADXDIFF);
return normalSimilar * depthSimilar;
}
static inline float gbuffDiff(GBufferAll s1, GBufferAll s2, const float a_fov, float w, float h)
{
const float ppSize = projectedPixelSize(s1.data1.depth, a_fov, w, h);
const float surfaceSimilar = surfaceSimilarity(to_float4(s1.data1.norm, s1.data1.depth),
to_float4(s2.data1.norm, s2.data1.depth), ppSize*2.0f);
const float surfaceDiff = 1.0f - surfaceSimilar;
const float objDiff = (s1.data2.instId == s2.data2.instId && s1.data2.objId == s2.data2.objId) ? 0.0f : 1.0f;
const float matDiff = (s1.data1.matId == s2.data1.matId) ? 0.0f : 1.0f;
const float alphaDiff = fabs(s1.data1.rgba.w - s2.data1.rgba.w);
return surfaceDiff + objDiff + matDiff + alphaDiff;
}
static inline float gbuffDiffObj(GBufferAll s1, GBufferAll s2, const float a_fov, int w, int h)
{
const float objDiff = (s1.data2.instId == s2.data2.instId && s1.data2.objId == s2.data2.objId) ? 0.0f : 1.0f;
const float matDiff = (s1.data1.matId == s2.data1.matId) ? 0.0f : 1.0f;
return objDiff + matDiff;
}
// static inline int reverseBits(int a_input, int a_maxSize)
// {
// int maxBit = 0;
// while (a_maxSize >>= 1)
// ++maxBit;
//
// int result = 0;
//
// for (int i = 0; i < maxBit; i++)
// {
// const int j = maxBit - i - 1;
// const int inputMask = (0x00000001 << j);
// result |= ((a_input & inputMask) >> j) << i;
// }
//
// return result;
// }
enum PLAIN_LIGHT_TYPES {
PLAIN_LIGHT_TYPE_POINT_OMNI = 0,
PLAIN_LIGHT_TYPE_POINT_SPOT = 1,
PLAIN_LIGHT_TYPE_DIRECT = 2,
PLAIN_LIGHT_TYPE_SKY_DOME = 3,
PLAIN_LIGHT_TYPE_AREA = 4,
PLAIN_LIGHT_TYPE_SPHERE = 5,
PLAIN_LIGHT_TYPE_CYLINDER = 6,
PLAIN_LIGHT_TYPE_MESH = 7,
};
enum PLAIN_LIGHT_FLAGS{
DISABLE_SAMPLING = 1,
SEPARATE_SKY_LIGHT_ENVIRONMENT = 2,
SKY_LIGHT_USE_PEREZ_ENVIRONMENT = 4,
AREA_LIGHT_SKY_PORTAL = 8,
LIGHT_HAS_IES = 16, ///< have spherical distribution mask around light
LIGHT_IES_POINT_AREA = 32, ///< apply IES honio from the center of light always.
LIGHT_DO_NOT_SAMPLE_ME = 64, ///< zero selection probability. never sample it.
};
enum SKY_PORTAL_COLOR_SOURCE { SKY_PORTAL_SOURCE_ENVIRONMENT = 1,
SKY_PORTAL_SOURCE_SKYLIGHT = 2,
SKY_PORTAL_SOURCE_CUSTOM = 3
};
static inline float3 triBaricentrics3(float3 ray_pos, float3 ray_dir, float3 A_pos, float3 B_pos, float3 C_pos)
{
const float3 edge1 = B_pos - A_pos;
const float3 edge2 = C_pos - A_pos;
const float3 pvec = cross(ray_dir, edge2);
const float det = dot(edge1, pvec);
const float inv_det = 1.0f / det;
const float3 tvec = ray_pos - A_pos;
const float v = dot(tvec, pvec)*inv_det;
const float3 qvec = cross(tvec, edge1);
const float u = dot(ray_dir, qvec)*inv_det;
const float t = dot(edge2, qvec)*inv_det;
return make_float3(u, v, t);
}
typedef struct ShadeContextT
{
float3 wp; ///< world pos
//float3 lp; ///< local pos
float3 l; ///< direction to light
float3 v; ///< view vector
float3 n; ///< smooth normal (for shading and new rays offsets)
float3 fn; ///< flat normal (for bump mapping and tangent space transform)
float3 tg; ///< tangent (for bump mapping and tangent space transform)
float3 bn; ///< binormal (for bump mapping and tangent space transform)
float2 tc; ///< tex coord (0);
//float2 tc1; ///< tex coord (1);
float2 tccp; ///< tex coord camera projected
bool hfi; ///< Hit.From.Inside. if hit surface from the inside of the object that have glass or SSS material
} ShadeContext;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define MAXPROCTEX 16
#define F4_PROCTEX_SIZE 12
/**
\brief this structure will store results of procedural texture kernel execution.
*/
typedef struct ProcTextureListT
{
int currMaxProcTex;
int id_f4 [MAXPROCTEX];
float3 fdata4[MAXPROCTEX];
} ProcTextureList;
static inline void InitProcTextureList(__private ProcTextureList* a_pList)
{
a_pList->currMaxProcTex = 0;
a_pList->id_f4[0] = INVALID_TEXTURE;
}
static inline void WriteProcTextureList(__global float4* fdata, int tid, int size, __private const ProcTextureList* a_pList)
{
__global int* idata = (__global int*)fdata;
if(a_pList->currMaxProcTex == 0)
{
idata[tid] = INVALID_TEXTURE;
return;
}
const int finalProcTex = (a_pList->currMaxProcTex > MAXPROCTEX) ? MAXPROCTEX : a_pList->currMaxProcTex;
for(int i=0;i<finalProcTex;i++)
idata[tid + size * i] = a_pList->id_f4[i];
if(finalProcTex < MAXPROCTEX)
idata[tid + size * finalProcTex] = INVALID_TEXTURE; // list end
#ifdef OCL_COMPILER
for(int i=0;i<finalProcTex;i+=2)
{
const float4 h1 = to_float4(a_pList->fdata4[i+0], 0.0f);
const float4 h2 = to_float4(a_pList->fdata4[i+1], 0.0f);
float8 data = {h1.x, h1.y, h1.z, h1.w,
h2.x, h2.y, h2.z, h2.w,};
const int offset = (tid + size * (i/2 + MAXPROCTEX/4));
vstore_half8(data, 0, (__global half*)(fdata + offset) );
}
#endif
}
static inline void ReadProcTextureList(__global float4* fdata, int tid, int size,
__private ProcTextureList* a_pList)
{
if (fdata == 0)
return;
__global int* idata = (__global int*)fdata;
int currMaxProcTex;
for(currMaxProcTex = 0; currMaxProcTex < MAXPROCTEX; currMaxProcTex++)
{
const int texId = idata[tid + size * currMaxProcTex];
a_pList->id_f4[currMaxProcTex] = texId;
if(texId == INVALID_TEXTURE)
break;
}
#ifdef OCL_COMPILER
for(int i=0;i<currMaxProcTex;i+=2)
{
const int offset = (tid + size * (i/2 + MAXPROCTEX/4));
const float8 data = vload_half8(0, (__global half*)(fdata + offset));
a_pList->fdata4[i+0] = to_float3(data.s0123);
a_pList->fdata4[i+1] = to_float3(data.s4567);
}
#endif
a_pList->currMaxProcTex = currMaxProcTex;
}
/**
\brief get color for precomputed procedural texture
\param a_texId - input tex id
\param a_pList - input ptl
\return texture color;
*/
static inline float4 readProcTex(int a_texId, const __private ProcTextureList* a_pList)
{
//for(int i=0; i<maxIter; i++)
//{
// if(a_texId == a_pList->id_f4[i])
// return to_float4(a_pList->fdata4[i], 0.0f);
//}
//
//return make_float4(1, 1, 1, -1.0f);
const int maxIter = (a_pList->currMaxProcTex < MAXPROCTEX) ? a_pList->currMaxProcTex : MAXPROCTEX; // min
float4 quad1 = make_float4(1, 1, 1, -1.0f);
quad1 = (0 < maxIter && a_texId == a_pList->id_f4[0]) ? to_float4(a_pList->fdata4[0], 0.0f) : quad1;
quad1 = (1 < maxIter && a_texId == a_pList->id_f4[1]) ? to_float4(a_pList->fdata4[1], 0.0f) : quad1;
quad1 = (2 < maxIter && a_texId == a_pList->id_f4[2]) ? to_float4(a_pList->fdata4[2], 0.0f) : quad1;
quad1 = (3 < maxIter && a_texId == a_pList->id_f4[3]) ? to_float4(a_pList->fdata4[3], 0.0f) : quad1;
float4 quad2 = make_float4(1, 1, 1, -1.0f);
quad2 = (4 < maxIter && a_texId == a_pList->id_f4[4]) ? to_float4(a_pList->fdata4[4], 0.0f) : quad2;
quad2 = (5 < maxIter && a_texId == a_pList->id_f4[5]) ? to_float4(a_pList->fdata4[5], 0.0f) : quad2;
quad2 = (6 < maxIter && a_texId == a_pList->id_f4[6]) ? to_float4(a_pList->fdata4[6], 0.0f) : quad2;
quad2 = (7 < maxIter && a_texId == a_pList->id_f4[7]) ? to_float4(a_pList->fdata4[7], 0.0f) : quad2;
const float4 quad12 = (quad1.w != -1.0f) ? quad1 : quad2;
float4 quad3 = make_float4(1, 1, 1, -1.0f);
quad3 = (8 < maxIter && a_texId == a_pList->id_f4[8]) ? to_float4(a_pList->fdata4[8], 0.0f) : quad3;
quad3 = (9 < maxIter && a_texId == a_pList->id_f4[9]) ? to_float4(a_pList->fdata4[9], 0.0f) : quad3;
quad3 = (10 < maxIter && a_texId == a_pList->id_f4[10]) ? to_float4(a_pList->fdata4[10], 0.0f) : quad3;
quad3 = (11 < maxIter && a_texId == a_pList->id_f4[11]) ? to_float4(a_pList->fdata4[11], 0.0f) : quad3;
float4 quad4 = make_float4(1, 1, 1, -1.0f);
quad4 = (12 < maxIter && a_texId == a_pList->id_f4[12]) ? to_float4(a_pList->fdata4[12], 0.0f) : quad4;
quad4 = (13 < maxIter && a_texId == a_pList->id_f4[13]) ? to_float4(a_pList->fdata4[13], 0.0f) : quad4;
quad4 = (14 < maxIter && a_texId == a_pList->id_f4[14]) ? to_float4(a_pList->fdata4[14], 0.0f) : quad4;
quad4 = (15 < maxIter && a_texId == a_pList->id_f4[15]) ? to_float4(a_pList->fdata4[15], 0.0f) : quad4;
const float4 quad34 = (quad3.w != -1.0f) ? quad3 : quad4;
return (quad12.w != -1.0f) ? quad12 : quad34;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
typedef struct ShadowSampleT
{
float3 pos;
float3 color;
float pdf;
float maxDist;
float cosAtLight;
bool isPoint;
} ShadowSample;
static inline void WriteShadowSample(const __private ShadowSample* a_pSam, float a_lightProbSel, int a_lightOffset, int a_tid, int a_threadNum,
__global float4* a_out)
{
const float pdfAndIsPoint = a_pSam->isPoint ? (-1.0f)*a_pSam->pdf : a_pSam->pdf;
a_out[a_tid + a_threadNum*0] = make_float4(a_pSam->pos.x, a_pSam->pos.y, a_pSam->pos.z, pdfAndIsPoint);
a_out[a_tid + a_threadNum*1] = make_float4(a_pSam->color.x, a_pSam->color.y, a_pSam->color.z, a_pSam->maxDist);
a_out[a_tid + a_threadNum*2] = make_float4(a_pSam->cosAtLight, a_lightProbSel, as_float(a_lightOffset), 0);
}
static inline void ReadShadowSample(const __global float4* a_in, int a_tid, int a_threadNum,
__private ShadowSample* a_pSam, __private float* a_pLightProbSel, __private int* a_pLightOffset)
{
const float4 f0 = a_in[a_tid + a_threadNum*0];
const float4 f1 = a_in[a_tid + a_threadNum*1];
const float4 f2 = a_in[a_tid + a_threadNum*2];
a_pSam->pos.x = f0.x; a_pSam->pos.y = f0.y;
a_pSam->pos.z = f0.z; a_pSam->pdf = fabs(f0.w); a_pSam->isPoint = (f0.w <= 0); // this is ok, if pdf is 0, it can be only point light
a_pSam->color.x = f1.x; a_pSam->color.y = f1.y;
a_pSam->color.z = f1.z; a_pSam->maxDist = f1.w;
a_pSam->cosAtLight = f2.x; (*a_pLightProbSel) = f2.y;
(*a_pLightOffset) = as_int(f2.z);
}
/**
\brief Per ray accumulated (for all bounces) data.
*/
typedef struct ALIGN_S(16) PerRayAccT
{
float pdfGTerm; ///< accumulated G term equal to product of G(x1,x2,x3) for all bounces; for 3-Way we aqctually don't need it and mult it with "-1" to store first bounce specular flag from light direcction.
float pdfLightWP; ///< accumulated probability per projected solid angle for light path
float pdfCameraWP; ///< accumulated probability per projected solid angle for camera path
float pdfCamA0; ///< equal to pdfWP[0]*G[0] (if [0] means light)
} PerRayAcc;
static inline PerRayAcc InitialPerParAcc()
{
PerRayAcc res;
res.pdfGTerm = 1.0f;
res.pdfLightWP = 1.0f;
res.pdfCameraWP = 1.0f;
res.pdfCamA0 = 1.0f;
return res;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
typedef struct SurfaceHitT
{
float3 pos;
float3 normal;
float3 flatNormal;
float3 tangent;
float3 biTangent;
float2 texCoord;
//float2 texCoord1;
float2 texCoordCamProj;
int matId;
float t;
float sRayOff;
bool hfi;
} SurfaceHit;
#define PV_PACK_VALID_FIELD 1
#define PV_PACK_WASSP_FIELD 2
#define PV_PACK_HITFI_FIELD 4 // Hit From Inside
#define PV_PACK_RCONN_FIELD 8 // Ready For Connect (pack this if capera path don't hit light but store camera vertex istead).
#define SURFACE_HIT_SIZE_IN_F4 4
static inline void WriteSurfaceHit(const __private SurfaceHit* a_pHit, int a_tid, int a_threadNum,
__global float4* a_out)
{
const float4 f1 = to_float4(a_pHit->pos, a_pHit->texCoord.x);
const float4 f2 = to_float4(a_pHit->normal, a_pHit->texCoord.y);
const float4 f3 = make_float4(as_float( encodeNormal(a_pHit->flatNormal)),
as_float( encodeNormal(a_pHit->tangent)),
as_float( encodeNormal(a_pHit->biTangent)),
as_float( a_pHit->matId)
);
// ignore (hit.t, hit.sRayOff) because bpt don't need them!
const int bit3 = a_pHit->hfi ? PV_PACK_HITFI_FIELD : 0;
const float4 f4 = make_float4(a_pHit->t, a_pHit->sRayOff, 0, as_float(bit3));
a_out[a_tid + 0*a_threadNum] = f1;
a_out[a_tid + 1*a_threadNum] = f2;
a_out[a_tid + 2*a_threadNum] = f3;
a_out[a_tid + 3*a_threadNum] = f4;
}
static inline void WriteSurfaceHitMatId(const int a_matId, int a_tid, int a_threadNum,
__global float4* a_out)
{
const float4 f3 = make_float4(0, 0, 0, as_float(a_matId));
a_out[a_tid + 2*a_threadNum] = f3;
}
static inline void ReadSurfaceHit(const __global float4* a_in, int a_tid, int a_threadNum,
__private SurfaceHit* a_pHit)
{
const float4 f1 = a_in[a_tid + 0*a_threadNum];
const float4 f2 = a_in[a_tid + 1*a_threadNum];
const float4 f3 = a_in[a_tid + 2*a_threadNum];
const float4 f4 = a_in[a_tid + 3*a_threadNum];
a_pHit->pos = to_float3 (f1); a_pHit->texCoord.x = f1.w;
a_pHit->normal = to_float3 (f2); a_pHit->texCoord.y = f2.w;
a_pHit->flatNormal = decodeNormal(as_int(f3.x));
a_pHit->tangent = decodeNormal(as_int(f3.y));
a_pHit->biTangent = decodeNormal(as_int(f3.z));
a_pHit->matId = as_int(f3.w);
a_pHit->t = f4.x;
a_pHit->sRayOff = f4.y;
const int flags = as_int(f4.w);
a_pHit->hfi = ((flags & PV_PACK_HITFI_FIELD) != 0);
}
static inline int ReadSurfaceHitMatId(const __global float4* a_in, int a_tid, int a_threadNum)
{
const float4 f3 = a_in[a_tid + 2*a_threadNum];
return as_int(f3.w);
}
static inline float3 ReadSurfaceHitPos(const __global float4* a_in, int a_tid, int a_threadNum)
{
return to_float3(a_in[a_tid + 0*a_threadNum]);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
enum PLAIN_MAT_TYPES {
PLAIN_MAT_CLASS_PHONG_SPECULAR = 0,
PLAIN_MAT_CLASS_BLINN_SPECULAR = 1, // Micro Facet Torrance Sparrow model with Blinn distribution
PLAIN_MAT_CLASS_PERFECT_MIRROR = 2,
PLAIN_MAT_CLASS_THIN_GLASS = 3,
PLAIN_MAT_CLASS_GLASS = 4,
PLAIN_MAT_CLASS_TRANSLUCENT = 5,
PLAIN_MAT_CLASS_SHADOW_MATTE = 6,
PLAIN_MAT_CLASS_LAMBERT = 7,
PLAIN_MAT_CLASS_OREN_NAYAR = 8,
PLAIN_MAT_CLASS_BLEND_MASK = 9,
PLAIN_MAT_CLASS_EMISSIVE = 10,
PLAIN_MAT_CLASS_VOLUME_PERLIN = 11, // incactive currently
PLAIN_MAT_CLASS_SSS = 12, // incactive currently
PLAIN_MAT_CLASS_BECKMANN = 13, // anisotropic BRDF
PLAIN_MAT_CLASS_TRGGX = 14, // anisotropic BRDF
PLAIN_MAT_CLASS_GGX = 15, // isotropic and simple
};
enum PLAIN_MAT_FLAGS{
PLAIN_MATERIAL_IS_LIGHT = 1,
PLAIN_MATERIAL_CAST_CAUSTICS = 2,
PLAIN_MATERIAL_HAS_DIFFUSE = 4,
PLAIN_MATERIAL_HAS_TRANSPARENCY = 8,
PLAIN_MATERIAL_INVERT_NMAP_X = 16,
PLAIN_MATERIAL_INVERT_NMAP_Y = 32,
PLAIN_MATERIAL_INVERT_SWAP_NMAP_XY = 64,
PLAIN_MATERIAL_INVERT_HEIGHT = 128,
PLAIN_MATERIAL_SKIP_SHADOW = 256,
PLAIN_MATERIAL_FORBID_EMISSIVE_GI = 512,
PLAIN_MATERIAL_SKIP_SKY_PORTAL = 1024,
PLAIN_MATERIAL_EMISSION_FALOFF = 2048,
// This flag marks node as a real blend of different materials.
// It used for blending emissive properties and normal maps.
//
PLAIN_MATERIAL_SURFACE_BLEND = 4096,
PLAIN_MATERIAL_HAVE_BTDF = 8192,
PLAIN_MATERIAL_INVIS_LIGHT = 16384,
PLAIN_MATERIAL_CAN_SAMPLE_REFL_ONLY = 32768,
PLAIN_MATERIAL_HAVE_PROC_TEXTURES = 32768*2,
PLAIN_MATERIAL_LOCAL_AO1 = 32768*4,
PLAIN_MATERIAL_LOCAL_AO2 = 32768*8,
PLAIN_MATERIAL_CAMERA_MAPPED_REFL = 32768*16,
PLAIN_MATERIAL_EMISSIVE_SHADOW_CATCHER = 32768*32,
PLAIN_MATERIAL_CATCHER_FIX_BLACK_TRIANGLES = 32768*64,
PLAIN_MATERIAL_FLIP_TANGENT = 32768*128,
PLAIN_MATERIAL_ENERGY_FIX_OR_MULTISCATTER = 32768*256,
};
#define PLAIN_MATERIAL_DATA_SIZE 192
#define PLAIN_MATERIAL_CUSTOM_DATA_SIZE 80
#define MIX_TREE_MAX_DEEP 7
struct PlainMaterialT
{
float data[PLAIN_MATERIAL_DATA_SIZE];
};
typedef struct PlainMaterialT PlainMaterial;
// emissive component, always present in material to speed-up code
//
#define EMISSIVE_COLORX_OFFSET 4
#define EMISSIVE_COLORY_OFFSET 5
#define EMISSIVE_COLORZ_OFFSET 6
#define EMISSIVE_TEXID_OFFSET 7
#define EMISSIVE_TEXMATRIXID_OFFSET 8
#define EMISSIVE_LIGHTID_OFFSET 9
#define OPACITY_TEX_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+1)
#define OPACITY_TEX_MATRIX (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+2)
#define NORMAL_TEX_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+3)
#define NORMAL_TEX_MATRIX (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+4)
#define EMISSIVE_BLEND_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+5)
#define PARALLAX_HEIGHT (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+6)
#define EMISSIVE_SAMPLER_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+8)
#define NORMAL_SAMPLER_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+20)
#define OPACITY_SAMPLER_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+32)
// #define PROC_TEX1_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+44) // FREE SLOT!
// #define PROC_TEX2_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+45) // FREE SLOT!
// #define PROC_TEX3_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+46) // FREE SLOT!
// #define PROC_TEX4_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+47) // FREE SLOT!
// #define PROC_TEX5_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+48) // FREE SLOT!
#define PROC_TEX_TABLE_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+49)
#define PROC_TEX_AO_TYPE (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+50)
#define PROC_TEX_AO_SAMPLER (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+52)
#define PROC_TEX_TEX_ID (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+64)
#define PROC_TEXMATRIX_ID (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+65)
#define PROC_TEX_AO_LENGTH (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+66)
#define PROC_TEX_AO_TYPE2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+67)
#define PROC_TEX_AO_SAMPLER2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+68)
#define PROC_TEX_TEX_ID2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+80)
#define PROC_TEXMATRIX_ID2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+81)
#define PROC_TEX_AO_LENGTH2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+82)
#define PROC_TEX1_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+83)
#define PROC_TEXN_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+99)
enum AO_TYPES { AO_TYPE_NONE = 0, AO_TYPE_UP = 1, AO_TYPE_DOWN = 2, AO_TYPE_BOTH = 4 };
#define PLAIN_MAT_TYPE_OFFSET 0
#define PLAIN_MAT_FLAGS_OFFSET 1
#define PLAIN_MAT_COMPONENTS_OFFSET 2
static inline int materialGetType (__global const PlainMaterial* a_pMat) { return as_int(a_pMat->data[PLAIN_MAT_TYPE_OFFSET]); }
static inline int materialGetFlags (__global const PlainMaterial* a_pMat) { return as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]); }
static inline bool materialCastCaustics (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_CAST_CAUSTICS) != 0; }
static inline bool materialHasTransparency (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_HAS_TRANSPARENCY) != 0; }
static inline bool materialIsSkyPortal (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_SKIP_SKY_PORTAL) != 0; }
static inline bool materialIsInvisLight (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_INVIS_LIGHT) != 0; }
static inline void PutProcTexturesIdListToMaterialHead(const ProcTextureList* a_pData, PlainMaterial* a_pMat)
{
for(int i=0;i<a_pData->currMaxProcTex;i++)
((int*)(a_pMat->data))[PROC_TEX1_F4_HEAD_OFFSET + i] = a_pData->id_f4[i];
for(int i=a_pData->currMaxProcTex; i<MAXPROCTEX; i++)
((int*)(a_pMat->data))[PROC_TEX1_F4_HEAD_OFFSET + i] = INVALID_TEXTURE;
}
static inline void GetProcTexturesIdListFromMaterialHead(__global const PlainMaterial* a_pMat, __private ProcTextureList* a_pData)
{
int currMaxProcTex;
for(currMaxProcTex = 0; currMaxProcTex < MAXPROCTEX; currMaxProcTex++)
{
const int texId = as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET + currMaxProcTex]);
a_pData->id_f4[currMaxProcTex] = texId;
if(texId == INVALID_TEXTURE)
break;
}
a_pData->currMaxProcTex = currMaxProcTex;
}
static inline bool materialHeadHaveTargetProcTex(__global const PlainMaterial* a_pMat, int a_texId)
{
return (as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+0]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+1]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+2]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+3]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+4]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+5]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+6]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+7]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+8]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+9]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+10]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+11]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+12]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+13]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+14]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+15]) == a_texId);
}
static inline bool MaterialHaveAtLeastOneProcTex(__global const PlainMaterial* a_pMat)
{
return as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET]) != INVALID_TEXTURE;
}
static inline bool MaterialHaveAO(__global const PlainMaterial* a_pMat)
{
return as_int(a_pMat->data[PROC_TEX_AO_TYPE]) != AO_TYPE_NONE;
}
static inline bool MaterialHaveAO2(__global const PlainMaterial* a_pMat)
{
return as_int(a_pMat->data[PROC_TEX_AO_TYPE]) != AO_TYPE_NONE && as_int(a_pMat->data[PROC_TEX_AO_TYPE2]) != AO_TYPE_NONE;
}
#define EVAL_FLAG_DEFAULT 0
#define EVAL_FLAG_DISABLE_CAUSTICS 1
#define EVAL_FLAG_FWD_DIR 2
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
\brief Select index proportional to piecewise constant function that is stored in a_accum[0 .. N-2]; Binary search version.
\param a_r - input random variable in rage [0, 1]
\param a_accum - input float array. it must be a result of prefix summ - i.e. it must be sorted.
\param N - size of extended array - i.e. a_accum[N-1] == summ(a_accum[0 .. N-2]).
\param pPDF - out parameter. probability of picking up found value.
\return found index
*/
static int SelectIndexPropToOpt(const float a_r, __global const float* a_accum, const int N,
__private float* pPDF)
{
int leftBound = 0;
int rightBound = N - 2; // because a_accum[N-1] == summ(a_accum[0 .. N-2]).
int counter = 0;
int currPos = -1;
const int maxStep = 50;
const float x = a_r*a_accum[N - 1];
while (rightBound - leftBound > 1 && counter < maxStep)
{
const int currSize = rightBound + leftBound;
const int currPos1 = (currSize % 2 == 0) ? (currSize + 1) / 2 : (currSize + 0) / 2;
const float a = a_accum[currPos1 + 0];
const float b = a_accum[currPos1 + 1];
if (a < x && x <= b)
{
currPos = currPos1;
break;
}
else if (x <= a)
rightBound = currPos1;
else if (x > b)
leftBound = currPos1;
counter++;
}
if (currPos < 0) // check the rest intervals
{
const float a1 = a_accum[leftBound + 0];
const float b1 = a_accum[leftBound + 1];
const float a2 = a_accum[rightBound + 0];
const float b2 = a_accum[rightBound + 1];
if (a1 < x && x <= b1)
currPos = leftBound;
if (a2 < x && x <= b2)
currPos = rightBound;
}
if (x == 0.0f)
currPos = 0;
else if (currPos < 0)
currPos = (rightBound + leftBound + 1) / 2;
(*pPDF) = (a_accum[currPos + 1] - a_accum[currPos]) / a_accum[N - 1];
return currPos;
}
/**
\brief search for for the lower bound (left range)
\param a - array
\param length - array size
\param left_range - value to search for
*/
static inline int binarySearchForLeftRange(__global const int2* a, int length, int left_range)
{
if (a[length - 1].x < left_range)
return -1;
int low = 0;
int high = length - 1;
while (low <= high)
{
int mid = low + ((high - low) / 2);
if (a[mid].x >= left_range)
high = mid - 1;
else //if(a[mid]<i)
low = mid + 1;
}
return high + 1;
}
/**
\brief search for for the upper bound (right range)
\param a - array
\param length - array size
\param left_range - value to search for
*/
static inline int binarySearchForRightRange(__global const int2* a, int length, int right_range)
{
if (a[0].x > right_range)
return -1;
int low = 0;
int high = length - 1;
while (low <= high)
{
int mid = low + ((high - low) / 2);
if (a[mid].x > right_range)
high = mid - 1;
else //if(a[mid]<i)
low = mid + 1;
}
return low - 1;
}
/**
\brief perform material id remap for instanced objects;
\param a_mId - input old material id
\param a_instId - input instance id
\param in_remapInst - array/table that maps instance id to remap list id
\param a_instTabSize - max instance id / size of 'in_remapInst' array
\param in_allMatRemapLists - all remap listss packed in to single array
\param in_remapTable - array/table that store offset inside 'in_allMatRemapLists' for each remap list which id we got from 'in_remapInst'
\papam a_remapTableSize - size of 'in_remapTable' array
\return new material id
*/
static inline int remapMaterialId(int a_mId, int a_instId,
__global const int* in_remapInst, int a_instTabSize,
__global const int* in_allMatRemapLists,
__global const int2* in_remapTable, int a_remapTableSize)
{
if (a_mId < 0 || a_instId < 0 || a_instId >= a_instTabSize || in_remapInst == 0 || in_allMatRemapLists == 0 || in_remapTable == 0)
return a_mId;
const int remapListId = in_remapInst[a_instId];
if(remapListId < 0 || remapListId >= a_remapTableSize) // || remapListId >= some size
return a_mId;
const int2 offsAndSize = in_remapTable[remapListId];
// int res = a_mId;
// for (int i = 0; i < offsAndSize.y; i++) // #TODO: change to binery search
// {
// int idRemapFrom = in_allMatRemapLists[offsAndSize.x + i * 2 + 0];
// int idRemapTo = in_allMatRemapLists[offsAndSize.x + i * 2 + 1];
//
// if (idRemapFrom == a_mId)
// {
// res = idRemapTo;
// break;
// }
// }
int low = 0;
int high = offsAndSize.y - 1;
while (low <= high)
{
const int mid = low + ((high - low) / 2);
const int idRemapFrom = in_allMatRemapLists[offsAndSize.x + mid * 2 + 0];
if (idRemapFrom >= a_mId)
high = mid - 1;
else //if(a[mid]<i)
low = mid + 1;
}
if (high+1 < offsAndSize.y)
{
const int idRemapFrom = in_allMatRemapLists[offsAndSize.x + (high + 1) * 2 + 0];
const int idRemapTo = in_allMatRemapLists[offsAndSize.x + (high + 1) * 2 + 1];
const int res = (idRemapFrom == a_mId) ? idRemapTo : a_mId;
return res;
}
else
return a_mId;
}
#define AO_RAYS_PACKED 4
static inline ushort4 compressShadow(float3 shadow)
{
ushort4 shadowCompressed;
shadowCompressed.x = (ushort)(65535.0f * shadow.x);
shadowCompressed.y = (ushort)(65535.0f * shadow.y);
shadowCompressed.z = (ushort)(65535.0f * shadow.z);
shadowCompressed.w = 0;
return shadowCompressed;
}
static inline float3 decompressShadow(ushort4 shadowCompressed)
{
const float invNormCoeff = 1.0f / 65535.0f;
return invNormCoeff*make_float3((float)shadowCompressed.x, (float)shadowCompressed.y, (float)shadowCompressed.z);
}
#define SPLIT_DL_BY_GRAMMAR true
//#define SBDPT_DEBUG_SPLIT 0
//#define SBDPT_DEBUG_DEPTH 4
//#define SBDPT_CHECK_BOUNCE 4
//#define SBDPT_INDIRECT_ONLY (void)
static inline float WrapVal(float a_val)
{
if (a_val > 1.0f)
return a_val - (float)((int)a_val);
else if(a_val < -1.0f)
return (float)((int)a_val) - a_val;
else
return a_val;
}
#endif
|
kmean_par.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include "immintrin.h"
#include "pack.h"
#include "kernel.h"
#include <string.h>
#include "rdtsc.h"
//#define DEBUG 1
//#define KERNEL 1
#define KERNEL_P_SIZE 8
#define KERNEL_D_SIZE 4
#define KERNEL_C_SIZE 4
int runs = 1000;
int psize; // number of points
int dsize; // number of dimensions
int k; // number of centers
int iterations;
int t_num;
int main(int argc, char *argv[])
{
if (argc != 6)
{
printf("Wrong arguments!");
}
iterations = atoi(argv[1]);
k = atoi(argv[2]);
dsize = atoi(argv[3]);
psize = atoi(argv[4]);
t_num = atoi(argv[5]);
double *points;
double *centers;
double *packed_points;
double *packed_centers;
double *new_centers;
int *new_centers_size;
//double *final_labels;
posix_memalign((void **)&points, 64, dsize * psize * sizeof(double));
posix_memalign((void **)¢ers, 64, k * dsize * sizeof(double));
posix_memalign((void **)&packed_points, 64, dsize * psize * sizeof(double));
posix_memalign((void **)&packed_centers, 64, dsize * psize * sizeof(double));
posix_memalign((void **)&new_centers, 64, k * dsize * sizeof(double));
posix_memalign((void **)&new_centers_size, 64, k * sizeof(int));
//posix_memalign((void **)&final_labels, 64, psize * sizeof(double));
// initialize points and centers
read_points_and_centers(points, centers, dsize, psize, k);
// pack the data into panel order
pack_points(points, packed_points, dsize, psize);
memcpy(new_centers, centers, k * dsize * sizeof(double));
unsigned long long t0, t1;
unsigned long long sum1 = 0;
// kmeans algorithm
for (int r = 0; r < runs; ++r) {
t0 = rdtsc();
for (int i = 0; i < iterations; ++i)
{
// initialize new centers to 0
pack_centers(new_centers, packed_centers, dsize, k);
memset(new_centers, 0, k * dsize * sizeof(double));
memset(new_centers_size, 0, k * sizeof(int));
int p_loop = psize / KERNEL_P_SIZE;
#pragma omp parallel for num_threads(t_num)
for (int p_i = 0; p_i < p_loop; ++p_i)
{
// set the proper initialize value
// 8 registers reserved for intermediate distance result
int p = p_i * KERNEL_P_SIZE;
__m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
double tmp_cluster_arr_g1[4] = {0.0, 0.0, 0.0, 0.0};
double tmp_min_dist_arr_g1[4] = {DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX};
double tmp_cluster_arr_g2[4] = {0.0, 0.0, 0.0, 0.0};
double tmp_min_dist_arr_g2[4] = {DBL_MAX, DBL_MAX, DBL_MAX, DBL_MAX};
for (int c = 0; c < k; c += KERNEL_C_SIZE) // 4 centers computed in each kernel
{
// 8 registers to store intermediate distances
ymm0 = _mm256_setzero_pd();
ymm1 = _mm256_setzero_pd();
ymm2 = _mm256_setzero_pd();
ymm3 = _mm256_setzero_pd();
ymm4 = _mm256_setzero_pd();
ymm5 = _mm256_setzero_pd();
ymm6 = _mm256_setzero_pd();
ymm7 = _mm256_setzero_pd();
for (int d = 0; d < dsize; d += KERNEL_D_SIZE) // 4 dimension in each kernel
{
// apply the kernel on 8 * 4 points matrix and 4 * 4 on centers matrix
kernel(KERNEL_D_SIZE, KERNEL_P_SIZE, &ymm0, &ymm1, &ymm2, &ymm3, &ymm4, &ymm5, &ymm6, &ymm7,
packed_points + p * dsize + d * KERNEL_P_SIZE, packed_centers + c * dsize + d * KERNEL_C_SIZE);
}
findCluster(&ymm0, &ymm1, &ymm2, &ymm3, &ymm4, &ymm5, &ymm6, &ymm7,
tmp_cluster_arr_g1, tmp_min_dist_arr_g1, tmp_cluster_arr_g2, tmp_min_dist_arr_g2, c * 1.0);
}
// update the cluster
#pragma omp critical
{
updateCluster(tmp_cluster_arr_g1, tmp_cluster_arr_g2, points + p * dsize, new_centers, psize, dsize, new_centers_size);
}
}
// Compute the new centers
computeNewCluster(new_centers, new_centers_size, dsize, k);
}
t1 = rdtsc();
sum1 += (t1 - t0);
}
// print new centers
for (int m = 0; m < dsize * k; m++) {
printf("%lf ", new_centers[m]);
if (m % dsize == dsize - 1) {
printf("\n");
}
}
printf("Execution cycles: %lld, times: %lf seconds, FLOPS/cycle: %lf\n",
sum1, (double) sum1 / (double) PROCESSOR_FREQ,
(double) (iterations * (psize * k * dsize * 3 + 2 * psize * k + psize * dsize)) / ((double) (sum1 / runs)));
}
|
pi.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define quantity 10000000
/* To calculate pi, the integral defined between 0 and 1 of (4/1 + x ^ 2) dx = pi was used
Numerically, this is nothing more than
the summation of i which goes from 1 to N of (4/1 + [(i - 1/2) h] ^ 2) = pi
*/
int main()
{
long int i, n = quantity;
double x, dx, f, sum, pi;
printf("Number of intervals: %ld\n", n);
sum=0.0;
dx = 1.0/(double)n;
/*
The for construct specifies that the iterations of the contained loop
should be distributed across team threads
*/
#pragma omp parallel for private (x, f, i) shared(dx, n) reduction(+:sum)
for(i=1; i<=n; i++)
{
x = dx * ((double)(i - 0.5));
f = 4.0 / (1.0+x*x);
sum+=f;
}
pi=dx*sum;
printf("PI %.24f\n", pi);
return 0;
} |
sparse_tsne_user_def_probabilities_inl.h | /*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef SPARSE_TSNE_USER_DEF_PROBABILITIES_INL
#define SPARSE_TSNE_USER_DEF_PROBABILITIES_INL
#include "hdi/dimensionality_reduction/sparse_tsne_user_def_probabilities.h"
#include "hdi/utils/math_utils.h"
#include "hdi/utils/log_helper_functions.h"
#include "hdi/utils/scoped_timers.h"
#include "sptree.h"
#include <random>
#ifdef __USE_GCD__
#include <dispatch/dispatch.h>
#endif
#pragma warning( push )
#pragma warning( disable : 4267)
#pragma warning( push )
#pragma warning( disable : 4291)
#pragma warning( push )
#pragma warning( disable : 4996)
#pragma warning( push )
#pragma warning( disable : 4018)
#pragma warning( push )
#pragma warning( disable : 4244)
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
namespace hdi{
namespace dr{
template <typename scalar, typename sparse_scalar_matrix>
SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::SparseTSNEUserDefProbabilities():
_initialized(false),
_logger(nullptr),
_theta(0),
_exaggeration_baseline(1)
{
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::reset(){
_initialized = false;
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::clear(){
_embedding->clear();
_initialized = false;
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::getEmbeddingPosition(scalar_vector_type& embedding_position, data_handle_type handle)const{
if(!_initialized){
throw std::logic_error("Algorithm must be initialized before ");
}
embedding_position.resize(_params._embedding_dimensionality);
for(int i = 0; i < _params._embedding_dimensionality; ++i){
(*_embedding_container)[i] = (*_embedding_container)[handle*_params._embedding_dimensionality + i];
}
}
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::initialize(const sparse_scalar_matrix& probabilities, data::Embedding<scalar_type>* embedding, TsneParameters params){
utils::secureLog(_logger,"Initializing tSNE...");
{//Aux data
_params = params;
unsigned int size = probabilities.size();
unsigned int size_sq = probabilities.size()*probabilities.size();
_embedding = embedding;
_embedding_container = &(embedding->getContainer());
_embedding->resize(_params._embedding_dimensionality,size);
_P.resize(size);
_Q.resize(size_sq);
_gradient.resize(size*params._embedding_dimensionality,0);
_previous_gradient.resize(size*params._embedding_dimensionality,0);
_gain.resize(size*params._embedding_dimensionality,1);
}
utils::secureLogValue(_logger,"Number of data points",_P.size());
computeHighDimensionalDistribution(probabilities);
initializeEmbeddingPosition(params._seed, params._rngRange);
_iteration = 0;
_initialized = true;
utils::secureLog(_logger,"Initialization complete!");
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::initializeWithJointProbabilityDistribution(const sparse_scalar_matrix& distribution, data::Embedding<scalar_type>* embedding, TsneParameters params){
utils::secureLog(_logger,"Initializing tSNE with a user-defined joint-probability distribution...");
{//Aux data
_params = params;
unsigned int size = distribution.size();
unsigned int size_sq = distribution.size()*distribution.size();
_embedding = embedding;
_embedding_container = &(embedding->getContainer());
_embedding->resize(_params._embedding_dimensionality,size);
_P.resize(size);
_Q.resize(size_sq);
_gradient.resize(size*params._embedding_dimensionality,0);
_previous_gradient.resize(size*params._embedding_dimensionality,0);
_gain.resize(size*params._embedding_dimensionality,1);
}
utils::secureLogValue(_logger,"Number of data points",_P.size());
_P = distribution;
initializeEmbeddingPosition(params._seed, params._rngRange);
_iteration = 0;
_initialized = true;
utils::secureLog(_logger,"Initialization complete!");
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::computeHighDimensionalDistribution(const sparse_scalar_matrix& probabilities){
utils::secureLog(_logger,"Computing high-dimensional joint probability distribution...");
const int n = getNumberOfDataPoints();
//Can be improved by using the simmetry of the matrix (half the memory) //TODO
for(int j = 0; j < n; ++j){
for(auto& elem: probabilities[j]){
scalar_type v0 = elem.second;
auto iter = probabilities[elem.first].find(j);
scalar_type v1 = 0.;
if(iter != probabilities[elem.first].end())
v1 = iter->second;
_P[j][elem.first] = static_cast<scalar_type>((v0+v1)*0.5);
_P[elem.first][j] = static_cast<scalar_type>((v0+v1)*0.5);
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::initializeEmbeddingPosition(int seed, double multiplier){
utils::secureLog(_logger,"Initializing the embedding...");
if(seed < 0){
std::srand(static_cast<unsigned int>(time(NULL)));
}
else{
std::srand(seed);
}
for (int i = 0; i < _embedding->numDataPoints(); ++i) {
double x(0.);
double y(0.);
double radius(0.);
do {
x = 2 * (rand() / ((double)RAND_MAX + 1)) - 1;
y = 2 * (rand() / ((double)RAND_MAX + 1)) - 1;
radius = (x * x) + (y * y);
} while((radius >= 1.0) || (radius == 0.0));
radius = sqrt(-2 * log(radius) / radius);
x *= radius * multiplier;
y *= radius * multiplier;
_embedding->dataAt(i, 0) = x;
_embedding->dataAt(i, 1) = y;
}
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::doAnIteration(double mult){
if(!_initialized){
throw std::logic_error("Cannot compute a gradient descent iteration on unitialized data");
}
if(_iteration == _params._mom_switching_iter){
utils::secureLog(_logger,"Switch to final momentum...");
}
if(_iteration == _params._remove_exaggeration_iter){
utils::secureLog(_logger,"Remove exaggeration...");
}
if(_theta == 0){
doAnIterationExact(mult);
}else{
doAnIterationBarnesHut(mult);
}
}
template <typename scalar, typename sparse_scalar_matrix>
scalar SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::exaggerationFactor(){
scalar_type exaggeration = _exaggeration_baseline;
if(_iteration <= _params._remove_exaggeration_iter){
exaggeration = _params._exaggeration_factor;
}else if(_iteration <= (_params._remove_exaggeration_iter + _params._exponential_decay_iter)){
//double decay = std::exp(-scalar_type(_iteration-_params._remove_exaggeration_iter)/30.);
double decay = 1. - double(_iteration-_params._remove_exaggeration_iter)/_params._exponential_decay_iter;
exaggeration = _exaggeration_baseline + (_params._exaggeration_factor-_exaggeration_baseline)*decay;
//utils::secureLogValue(_logger,"Exaggeration decay...",exaggeration);
}
return exaggeration;
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::doAnIterationExact(double mult){
//Compute Low-dimensional distribution
computeLowDimensionalDistribution();
//Compute gradient of the KL function
computeExactGradient(exaggerationFactor());
//Update the embedding based on the gradient
updateTheEmbedding(mult);
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::doAnIterationBarnesHut(double mult){
//Compute gradient of the KL function using the Barnes Hut approximation
computeBarnesHutGradient(exaggerationFactor());
//Update the embedding based on the gradient
updateTheEmbedding();
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::computeLowDimensionalDistribution(){
const int n = getNumberOfDataPoints();
#ifdef __USE_GCD__
//std::cout << "GCD dispatch, sparse_tsne_user_def_probabilities 285.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__USE_GCD__
_Q[j*n + j] = 0;
for(int i = j+1; i < n; ++i){
const double euclidean_dist_sq(
utils::euclideanDistanceSquared<scalar_type>(
(*_embedding_container).begin()+j*_params._embedding_dimensionality,
(*_embedding_container).begin()+(j+1)*_params._embedding_dimensionality,
(*_embedding_container).begin()+i*_params._embedding_dimensionality,
(*_embedding_container).begin()+(i+1)*_params._embedding_dimensionality
)
);
const double v = 1./(1.+euclidean_dist_sq);
_Q[j*n + i] = static_cast<scalar_type>(v);
_Q[i*n + j] = static_cast<scalar_type>(v);
}
}
#ifdef __USE_GCD__
);
#endif // __USE_GCD__
double sum_Q = 0;
for(auto& v : _Q){
sum_Q += v;
}
_normalization_Q = static_cast<scalar_type>(sum_Q);
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::computeExactGradient(double exaggeration){
const int n = getNumberOfDataPoints();
const int dim = _params._embedding_dimensionality;
for(int i = 0; i < n; ++i){
for(int d = 0; d < dim; ++d){
_gradient[i * dim + d] = 0;
}
}
for(int i = 0; i < n; ++i){
for(int j = 0; j < n; ++j){
for(int d = 0; d < dim; ++d){
const int idx = i*n + j;
const double distance((*_embedding_container)[i * dim + d] - (*_embedding_container)[j * dim + d]);
const double negative(_Q[idx] * _Q[idx] / _normalization_Q * distance);
_gradient[i * dim + d] += static_cast<scalar_type>(-4*negative);
}
}
for(auto& elem: _P[i]){
for(int d = 0; d < dim; ++d){
const int j = elem.first;
const int idx = i*n + j;
const double distance((*_embedding_container)[i * dim + d] - (*_embedding_container)[j * dim + d]);
double p_ij = elem.second/n;
const double positive(p_ij * _Q[idx] * distance);
_gradient[i * dim + d] += static_cast<scalar_type>(4*exaggeration*positive);
}
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::computeBarnesHutGradient(double exaggeration){
typedef double hp_scalar_type;
SPTree<scalar_type> sptree(_params._embedding_dimensionality,_embedding->getContainer().data(),getNumberOfDataPoints());
scalar_type sum_Q = .0;
std::vector<hp_scalar_type> positive_forces(getNumberOfDataPoints()*_params._embedding_dimensionality);
/*__block*/ std::vector<hp_scalar_type> negative_forces(getNumberOfDataPoints()*_params._embedding_dimensionality);
sptree.computeEdgeForces(_P, exaggeration, positive_forces.data());
/*__block*/ std::vector<hp_scalar_type> sum_Q_subvalues(getNumberOfDataPoints(),0);
//#ifdef __USE_GCD__
// std::cout << "GCD dispatch, sparse_tsne_user_def_probabilities 365.\n";
// dispatch_apply(getNumberOfDataPoints(), dispatch_get_global_queue(0, 0), ^(size_t n) {
//#else
#pragma omp parallel for
for(int n = 0; n < getNumberOfDataPoints(); n++){
//#endif //__USE_GCD__
sptree.computeNonEdgeForcesOMP(n, _theta, negative_forces.data() + n * _params._embedding_dimensionality, sum_Q_subvalues[n]);
}
//#ifdef __USE_GCD__
// );
//#endif
sum_Q = 0;
for(int n = 0; n < getNumberOfDataPoints(); n++){
sum_Q += sum_Q_subvalues[n];
}
for(int i = 0; i < _gradient.size(); i++){
_gradient[i] = positive_forces[i] - (negative_forces[i] / sum_Q);
}
}
//temp
template <typename T>
T sign(T x) { return (x == .0 ? .0 : (x < .0 ? -1.0 : 1.0)); }
template <typename scalar, typename sparse_scalar_matrix>
void SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::updateTheEmbedding(double mult){
for(int i = 0; i < _gradient.size(); ++i){
_gain[i] = static_cast<scalar_type>((sign(_gradient[i]) != sign(_previous_gradient[i])) ? (_gain[i] + .2) : (_gain[i] * .8));
if(_gain[i] < _params._minimum_gain){
_gain[i] = static_cast<scalar_type>(_params._minimum_gain);
}
_gradient[i] = static_cast<scalar_type>((_gradient[i]>0?1:-1)*std::abs(_gradient[i]*_params._eta* _gain[i])/(_params._eta*_gain[i]));
_previous_gradient[i] = static_cast<scalar_type>(((_iteration<_params._mom_switching_iter)?_params._momentum:_params._final_momentum) * _previous_gradient[i] - _params._eta * _gain[i] * _gradient[i]);
(*_embedding_container)[i] += static_cast<scalar_type>(_previous_gradient[i] * mult);
}
//MAGIC NUMBER
if(exaggerationFactor() > 1.2){
_embedding->scaleIfSmallerThan(0.1f);
}else{
_embedding->zeroCentered();
}
++_iteration;
}
template <typename scalar, typename sparse_scalar_matrix>
double SparseTSNEUserDefProbabilities<scalar, sparse_scalar_matrix>::computeKullbackLeiblerDivergence(){
assert(false);
return 0;
}
}
}
#endif
|
noble_1962.c | #include "noble_1962.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Original: -87,0.01,0.8,0.01
// Mestrado Lucas: -75.5344986658,0.0605467272,0.7259001355,0.4709239708
// 500ms pacing steady state: -76.1302f, 0.0586201f, 0.741082f, 0.475923f
// 450ms pacing steady-state: -78.8607f, 0.050476f, 0.806294f, 0.518928f
sv[0] = -75.5344986658f; // V millivolt
sv[1] = 0.0605467272f; // m dimensionless
sv[2] = 0.7259001355f; // h dimensionless
sv[3] = 0.4709239708f; // n dimensionless
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(dt, rY, rDY, stim_current);
// Explicit Euler
for(int i = 0; i < NEQ; i++)
sv[i] = dt*rDY[i] + rY[i];
}
void RHS_cpu(const real dt, const real *sv, real *rDY_, real stim_current) {
//State variables
const real V_old_ = sv[0];
const real m_old_ = sv[1];
const real h_old_ = sv[2];
const real n_old_ = sv[3];
//___________________________________________________________________________
//Parameters (miliseconds)
const real Cm = 12.0f; // (microF)
const real g_na_max = 400.0f; // (microS)
const real E_na = 40.0f; // (millivolt)
const real g_L = 0.075f; // (microS)
const real E_L = -60.0f; // (millivolt)
real calc_I_stim = stim_current;
// Algebraics
real g_na = pow(m_old_, 3.00000)*h_old_*g_na_max;
real alpha_m = (((1.0e-01*((-V_old_)-4.8e+01))/(exp((((-V_old_)-4.8e+01)/1.5e+01))-1.0e+00)));
real alpha_h = ((1.7e-01*exp((((-V_old_)-9.0e+01)/2.0e+01))));
real alpha_n = (((1.0e-04*((-V_old_)-5.0e+01))/(exp((((-V_old_)-5.0e+01)/1.0e+01))-1.0e+00)));
real i_na = (g_na+1.4e-01)*(V_old_ - E_na);
//real i_na_no_oscilation = (g_na+122.500)*(V_old_ - E_na);
real beta_m = (((1.2e-01*(V_old_+8.0e+00))/(exp(((V_old_+8.0e+00)/5.0e+00))-1.0e+00)));
real beta_h = ((1.0/(1.0e+00+exp((((-V_old_)-4.2e+01)/1.0e+01)))));
real beta_n = ((2.0e-03*exp((((-V_old_)-9.0e+01)/8.0e+01))));
//real g_K1 = 1.3f*exp((- V_old_ - 90.0000)/50.0000)+ 0.015f*exp((V_old_+90.0000)/60.0000);
real g_K1 = (((1.2*exp((((-V_old_)-9.0e+01)/5.0e+01)))+(1.5e-02*exp(((V_old_+9.0e+01)/6.0e+01)))));
real g_K2 = 1.2f*pow(n_old_, 4.00000);
real i_k = (g_K1+g_K2)*(V_old_+100.000);
real i_leak = g_L*(V_old_ - E_L);
// Rates
rDY_[0] = ( - (i_na + i_k + i_leak + calc_I_stim)) / Cm;
//rDY_[0] = (- (i_na_no_oscilation + i_k + i_leak + calc_I_stim)/Cm) * 1.0E-03;
// Rush-Larsen
//real m_inf = alpha_m / (alpha_m + beta_m);
//real tau_m = 1.0 / (alpha_m + beta_m);
//rDY_[1] = m_inf + ((m_old_ - m_inf)*exp(-dt/tau_m));
//real h_inf = alpha_h / (alpha_h + beta_h);
//real tau_h = 1.0 / (alpha_h + beta_h);
//rDY_[2] = h_inf + ((h_old_ - h_inf)*exp(-dt/tau_h));
//real n_inf = alpha_n / (alpha_n + beta_n);
//real tau_n = 1.0 / (alpha_n + beta_n);
//rDY_[3] = n_inf + ((n_old_ - n_inf)*exp(-dt/tau_n));
// Old Euler code ...
rDY_[1] = (alpha_m*(1.00000 - m_old_) - (beta_m*m_old_) );
rDY_[2] = (alpha_h*(1.00000 - h_old_) - (beta_h*h_old_) );
rDY_[3] = (alpha_n*(1.00000 - n_old_) - (beta_n*n_old_) );
} |
GB_unop__identity_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint8_int8
// op(A') function: GB_unop_tran__identity_uint8_int8
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint8_int8
(
uint8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_uint8_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_uint32)
// op(A') function: GB (_unop_tran__identity_uint8_uint32)
// C type: uint8_t
// A type: uint32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_uint32)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__lt_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__lt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_fp64)
// A*D function (colscale): GB (_AxD__lt_fp64)
// D*A function (rowscale): GB (_DxB__lt_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_fp64)
// C=scalar+B GB (_bind1st__lt_fp64)
// C=scalar+B' GB (_bind1st_tran__lt_fp64)
// C=A+scalar GB (_bind2nd__lt_fp64)
// C=A'+scalar GB (_bind2nd_tran__lt_fp64)
// C type: bool
// A type: double
// B,b type: double
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_FP64 || GxB_NO_LT_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fib.c | #include <stdio.h>
#define N 42
long fib(long n) {
long i, j;
if (n<2)
return n;
else if (n < 30) {
return fib(n-1) + fib (n-2);
}
else {
#pragma omp task shared(i)
i=fib(n-1);
#pragma omp task shared(j)
j=fib(n-2);
#pragma omp taskwait
return i+j;
}
}
int main() {
#pragma omp parallel
#pragma omp single
printf("\nFibonacci(%lu) = %lu\n",(long)N,fib((long)N));
}
|
parallel_push_relabel.h | /*
* Implementation of Goldberg-Tarjan's parallel push-relabel algorithm. Description can be found in
* Goldberg, Andrew and Tarjan, Robert, A New Approach to the Maximum-Flow Problem, J. ACM, 1988.
*
* This implementation is also based on detailed pseudocode presented in
* Baumstark, Niklas, Speeding up Maximum Flow Computations on Shared-Memory Platforms, KIT, Karlsruhe, 2014.
*/
#ifndef MAXFLOW_PARALLEL_PUSH_RELABEL_H
#define MAXFLOW_PARALLEL_PUSH_RELABEL_H
#include <memory>
#include <chrono>
#include <iostream>
#include <atomic>
#include <omp.h>
#include <algorithm>
#include "../../common_types.h"
#include "../../data_structures/queue.h"
#include "../../data_structures/thread_local_buffer_pool.h"
#ifndef CACHE_LINE_SIZE
#define CACHE_LINE_SIZE 64
#endif
namespace parallel_push_relabel
{
template <template <class> typename vector, typename T, typename U>
class max_flow_instance
{
struct alignas (CACHE_LINE_SIZE) vertex
{
U excess { 0 };
std::atomic<U> new_excess { 0 };
T label;
T new_label;
std::atomic_flag discovered = ATOMIC_FLAG_INIT;
};
vector<vector<cached_edge<T, U>>> & _residual_network;
std::unique_ptr<vertex[]> _vertices;
std::unique_ptr<T[]> _active { };
data_structures::thread_local_buffer_pool<T> _pool;
T _source, _sink, _relabel_threshold, _active_cnt;
std::size_t _relabel_progress;
const T _thread_count;
public:
max_flow_instance ( vector<vector<cached_edge<T, U>>> & graph, T source, T sink,
std::size_t thread_count = static_cast<size_t>(omp_get_max_threads ()) )
:
_residual_network ( graph ),
_vertices ( std::make_unique<vertex[]> ( _residual_network . size () ) ),
_active ( std::make_unique<T[]> ( _residual_network . size () ) ),
_pool ( data_structures::thread_local_buffer_pool<T> ( thread_count, _residual_network . size () ) ),
_source ( source ), _sink ( sink ), _active_cnt ( 0 ), _relabel_progress ( 0 ),
_thread_count ( thread_count )
{
omp_set_num_threads ( static_cast<int> ( _thread_count ) );
init ();
}
uint64_t _phase_cnt = 0;
uint64_t _push_cnt = 0;
uint64_t _global_update_cnt = 0;
U find_max_flow ( ) noexcept
{
find_max_flow_inner ();
#ifdef DEBUG
std::cout << "global updates:\t" << _global_update_cnt << std::endl;
std::cout << "phase cnt: " << _phase_cnt << std::endl;
std::cout << "pushes: " << _push_cnt << std::endl;
#endif
return _vertices[_sink] . new_excess + _vertices[_sink] . excess;
}
void preflow_to_flow ( )
{
std::swap ( _source, _sink );
find_max_flow_inner ();
std::swap ( _source, _sink );
#ifdef DEBUG
for ( std::size_t i = 0; i < _residual_network . size(); ++i )
if ( i != _source && i != _sink )
if ( _vertices[i] . excess > 0 )
std::cerr << "Excess violation: vertex " << i << ", excess " << _vertices[i] . excess << '\n';
#endif
}
auto steal_network ( )
{
return std::move ( _residual_network );
}
private:
static constexpr T ALPHA = 6, BETA = 12;
static constexpr double GLOBAL_RELABEL_FREQ = 0.5;
void init ( ) noexcept
{
#pragma omp parallel for schedule(static)
for ( std::size_t i = 0; i < _residual_network[_source] . size (); ++i )
{
auto & edge = _residual_network[_source][i];
_vertices[edge . dst_vertex] . excess = edge . r_capacity;
edge . reverse_r_capacity += edge . r_capacity;
_residual_network[edge . dst_vertex][edge . reverse_edge_index] . r_capacity += edge . r_capacity;
_residual_network[edge . dst_vertex][edge . reverse_edge_index] . reverse_r_capacity -= edge . r_capacity;
edge . r_capacity = 0;
}
T m = 0;
for ( std::size_t i = 0; i < _residual_network . size (); ++i )
m += _residual_network[i] . size ();
_relabel_threshold = _residual_network . size () * ALPHA + m / 2;
}
void find_max_flow_inner ( )
{
global_relabel ();
for ( ;; )
{
if ( _active_cnt == 0 )
return;
++_phase_cnt;
uint64_t push_cnt_per_phase = 0;
#pragma omp parallel
{
#pragma omp for schedule(static) reduction(+:push_cnt_per_phase)
for ( T i = 0; i < _active_cnt; ++i )
{
auto thr_id = omp_get_thread_num ();
auto vertex = _active[i];
if ( _vertices[vertex] . label == _residual_network . size () )
continue;
push ( vertex, _vertices[vertex] . label, thr_id, push_cnt_per_phase );
}
//stage 2
#pragma omp for schedule(static) reduction(+:_relabel_progress)
for ( T i = 0; i < _active_cnt; ++i )
{
auto thr_id = omp_get_thread_num ();
auto vertex = _active[i];
relabel ( vertex, thr_id, _relabel_progress );
}
//stage 3
#pragma omp for schedule(static)
for ( T i = 0; i < _active_cnt; ++i )
{
auto vertex = _active[i];
_vertices[vertex] . label = _vertices[vertex] . new_label;
_vertices[vertex] . discovered . clear ( std::memory_order_relaxed );
}
//stage 4
#pragma omp single
_active_cnt = _pool . swap_data ( _active );
#pragma omp for schedule(static)
for ( T i = 0; i < _active_cnt; ++i )
{
auto vertex = _active[i];
_vertices[vertex] . excess += _vertices[vertex] . new_excess . load ( std::memory_order_relaxed );
_vertices[vertex] . new_excess . store ( 0, std::memory_order_relaxed );
_vertices[vertex] . discovered . clear ( std::memory_order_relaxed );
}
}
if ( _relabel_progress * GLOBAL_RELABEL_FREQ >= _relabel_threshold || push_cnt_per_phase == 0 )
{
_relabel_progress = 0;
global_relabel ();
}
_push_cnt += push_cnt_per_phase;
}
}
inline void push ( const T vertex, const T label, int thr_id, uint64_t & push_cnt ) noexcept
{
const auto target_label = label - 1;
for ( auto & edge : _residual_network[vertex] )
{
if ( edge . r_capacity > 0 && _vertices[edge . dst_vertex] . label == target_label )
{
auto flow = std::min ( _vertices[vertex] . excess, edge . r_capacity );
if ( edge . dst_vertex != _source && edge . dst_vertex != _sink )
if ( !_vertices[edge . dst_vertex] . discovered . test_and_set ( std::memory_order_relaxed ) )
_pool . push_back ( edge . dst_vertex, static_cast<size_t>(thr_id) );
++push_cnt;
_vertices[vertex] . excess -= flow;
_vertices[edge . dst_vertex] . new_excess . fetch_add ( flow, std::memory_order_relaxed );
edge . r_capacity -= flow;
edge . reverse_r_capacity += flow;
_residual_network[edge . dst_vertex][edge . reverse_edge_index] . reverse_r_capacity -= flow;
_residual_network[edge . dst_vertex][edge . reverse_edge_index] . r_capacity += flow;
if ( _vertices[vertex] . excess == 0 )
return;
}
}
}
inline void relabel ( const T vertex, const int thr_id, std::size_t & relabel_progress ) noexcept
{
if ( _vertices[vertex] . excess > 0 || _vertices[vertex] . label == _residual_network . size () )
{
relabel_progress += BETA;
_vertices[vertex] . new_label = calculate_new_label ( vertex );
relabel_progress += _residual_network[vertex] . size ();
if ( _vertices[vertex] . new_label == _residual_network . size () )
{
_vertices[vertex] . excess += _vertices[vertex] . new_excess;
_vertices[vertex] . new_excess = 0;
return;
}
if ( !_vertices[vertex] . discovered . test_and_set ( std::memory_order_relaxed ) )
_pool . push_back ( vertex, static_cast<size_t>(thr_id) );
} else
_vertices[vertex] . new_label = _vertices[vertex] . label;
}
inline T calculate_new_label ( const T vertex ) noexcept
{
T increase_to = _residual_network . size () - 1;
for ( auto & edge : _residual_network[vertex] )
{
if ( edge . r_capacity == 0 )
continue;
increase_to = std::min ( increase_to, _vertices[edge . dst_vertex] . label );
}
return increase_to + 1;
}
void global_relabel ( ) noexcept
{
++_global_update_cnt;
const auto not_reached = _residual_network . size ();
#pragma omp parallel for schedule(static)
for ( std::size_t i = 0; i < _residual_network . size (); ++i )
_vertices[i] . label = not_reached;
_vertices[_sink] . label = 0;
_vertices[_sink] . discovered . test_and_set ();
assert ( _pool . empty () );
_active[0] = _sink;
std::size_t current_queue_size = 1;
T current_distance = 0;
while ( current_queue_size > 0 )
{
#pragma omp parallel for schedule(static)
for ( std::size_t i = 0; i < current_queue_size; ++i )
{
auto thr_id = omp_get_thread_num ();
auto current_vertex = _active[i];
for ( auto edge : _residual_network[current_vertex] )
{
if ( edge . reverse_r_capacity > 0 )
{
if ( !_vertices[edge . dst_vertex] . discovered . test_and_set ( std::memory_order_relaxed ) )
{
_vertices[edge . dst_vertex] . label = current_distance + 1;
_pool . push_back ( edge . dst_vertex, static_cast<std::size_t>(thr_id) );
}
}
}
}
current_queue_size = _pool . swap_data ( _active );
++current_distance;
}
#pragma omp parallel for schedule(static)
for ( std::size_t i = 0; i < _residual_network . size (); ++i )
{
auto thr_id = omp_get_thread_num ();
if ( _vertices[i] . label != not_reached && _vertices[i] . excess > 0 && i != _sink )
_pool . push_back ( i, static_cast<size_t>(thr_id) );
_vertices[i] . discovered . clear ( std::memory_order_relaxed );
}
_active_cnt = _pool . swap_data ( _active );
}
};
}
#endif //MAXFLOW_PARALLEL_PUSH_RELABEL_H
|
grid_refinement.c | //
// Created by sachetto on 30/09/17.
//
#include "grid.h"
/**
* Decides if the grid should be refined by traversing the whole grid, according
* to parameters refinementLevel and refinementBound. A cell will not be refined
* either if its refinement level is equal to refinementLevel or the highest
* of all fluxes coming into it from the six directions is less than
* refinementBound.
*
* @param min_h Minimum refinement level required for the graph.
* @param refinement_bound Minimum flux required for each cell of graph.
*/
bool refine_grid_with_bound(struct grid* the_grid, double refinement_bound, double min_h) {
if( min_h <= 0.0 ) {
fprintf(stderr,"refine_grid(): Parameter min_h must be positive, passed %lf.", min_h);
return false;
}
struct cell_node *grid_cell, *auxiliar_grid_cell;
double maximum_flux;
bool continue_refining = true;
bool refined_once = false;
set_grid_flux(the_grid);
uint32_t *free_sv_pos = the_grid->free_sv_positions;
sb_clear(the_grid->refined_this_step);
while( continue_refining ) {
continue_refining = false;
grid_cell = the_grid->first_cell;
while( grid_cell != 0 ) {
maximum_flux = get_cell_maximum_flux(grid_cell);
if( ( grid_cell->can_change && grid_cell->active ) &&
( grid_cell->face_length > min_h ) &&
( maximum_flux >= refinement_bound ) )
{
auxiliar_grid_cell = grid_cell;
grid_cell = grid_cell->next;
refine_cell(auxiliar_grid_cell, free_sv_pos, &(the_grid->refined_this_step));
the_grid->number_of_cells += 7;
continue_refining = true;
refined_once = true;
}
else {
grid_cell = grid_cell->next;
}
}
}
return refined_once;
}
void refine_grid(struct grid* the_grid, int num_steps) {
if( the_grid == NULL ) {
fprintf(stderr,"refine_grid(): Parameter the_grid can't be null. Exiting!");
exit(10);
}
struct cell_node *grid_cell, *auxiliar_grid_cell;
for(int i = 0; i < num_steps; i++) {
grid_cell = the_grid->first_cell;
while (grid_cell != 0) {
if (grid_cell->can_change && grid_cell->active) {
auxiliar_grid_cell = grid_cell;
grid_cell = grid_cell->next;
refine_cell(auxiliar_grid_cell, NULL, NULL);
the_grid->number_of_cells += 7;
} else {
grid_cell = grid_cell->next;
}
}
}
}
void refine_grid_cell_at(struct grid *the_grid, uint32_t cell_number) {
if( (cell_number > the_grid->number_of_cells) || (cell_number < 0) ) {
fprintf(stderr, "refine_grid_cell_at: cell_number %u is out of bounds. Exiting!", cell_number);
exit(10);
}
struct cell_node *grid_cell = the_grid->first_cell;
for( uint32_t i = 0; i < cell_number; i++ ) {
grid_cell = grid_cell->next;
}
refine_cell( grid_cell, NULL ,NULL);
the_grid->number_of_cells += 7;
}
void set_grid_flux(struct grid *the_grid) {
uint32_t active_cells = the_grid->num_active_cells;
struct cell_node **ac = the_grid->active_cells;
int i;
#pragma omp parallel for
for (i = 0; i < active_cells; i++) {
ac[i]->north_flux = 0.0;
ac[i]->south_flux = 0.0;
ac[i]->east_flux = 0.0;
ac[i]->west_flux = 0.0;
ac[i]->front_flux = 0.0;
ac[i]->back_flux = 0.0;
}
#pragma omp parallel for
for (i = 0; i < active_cells; i++) {
set_cell_flux(ac[i], 's' ); // Computes south flux.
set_cell_flux(ac[i], 'n' ); // Computes north flux.
set_cell_flux(ac[i], 'e' ); // Computes east flux.
set_cell_flux(ac[i], 'w' ); // Computes west flux.
set_cell_flux(ac[i], 'f' ); // Computes front flux.
set_cell_flux(ac[i], 'b' ); // Computes back flux.
}
}
void refine_fibrotic_cells(struct grid *the_grid) {
assert(the_grid);
struct cell_node *grid_cell, *auxiliar_grid_cell;
grid_cell = the_grid->first_cell;
while ( grid_cell != 0 ) {
if(grid_cell->active && grid_cell->fibrotic) {
auxiliar_grid_cell = grid_cell;
grid_cell = grid_cell->next;
refine_cell( auxiliar_grid_cell, NULL, NULL);
the_grid->number_of_cells += 7;
}
else {
grid_cell = grid_cell->next;
}
}
}
void refine_border_zone_cells(struct grid *the_grid) {
assert(the_grid);
struct cell_node *grid_cell, *auxiliar_grid_cell;
grid_cell = the_grid->first_cell;
while ( grid_cell != 0 ) {
if(grid_cell->active && grid_cell->border_zone) {
auxiliar_grid_cell = grid_cell;
grid_cell = grid_cell->next;
refine_cell( auxiliar_grid_cell, NULL, NULL);
the_grid->number_of_cells += 7;
}
else {
grid_cell = grid_cell->next;
}
}
}
|
batch_shuffle_message_manager.h | /** Copyright 2020 Alibaba Group Holding Limited.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef GRAPE_PARALLEL_BATCH_SHUFFLE_MESSAGE_MANAGER_H_
#define GRAPE_PARALLEL_BATCH_SHUFFLE_MESSAGE_MANAGER_H_
#include <memory>
#include <thread>
#include <vector>
#include "grape/communication/sync_comm.h"
#include "grape/parallel/message_manager_base.h"
#include "grape/utils/vertex_array.h"
#include "grape/worker/comm_spec.h"
namespace grape {
/**
* @brief A kind of collective message manager.
*
* This message manager is designed for the scenario that all mirror vertices'
* state need to be override with their masters' state, e.g. PageRank.
*
* After a round, message manager will encode the inner vertices' state of a
* vertex array for each other fragment.
*
* When receive a batch of messages, message manager will update the state of
* outer vertices in a designated vertex array.
*/
namespace batch_shuffle_message_manager_impl {
template <typename FRAG_T>
struct IsRange {
using sub_vertices_t = typename FRAG_T::sub_vertices_t;
using vid_t = typename FRAG_T::vid_t;
static constexpr bool value =
std::is_same<sub_vertices_t, VertexRange<vid_t>>::value;
};
template <typename FRAG_T, typename MESSAGE_T>
struct ShuffleInplace {
static constexpr bool value =
IsRange<FRAG_T>::value && std::is_pod<MESSAGE_T>::value;
};
template <typename FRAG_T, typename MESSAGE_T>
struct PodShuffle {
static constexpr bool value =
!IsRange<FRAG_T>::value && std::is_pod<MESSAGE_T>::value;
};
template <typename MESSAGE_T>
struct ArchiveShuffle {
static constexpr bool value = !std::is_pod<MESSAGE_T>::value;
};
class PostProcessBase {
public:
PostProcessBase() {}
virtual ~PostProcessBase() {}
virtual void exec(fid_t fid) = 0;
};
template <typename GRAPH_T, typename DATA_T>
class PostProcess : public PostProcessBase {
using array_t = typename GRAPH_T::template vertex_array_t<DATA_T>;
public:
PostProcess(const GRAPH_T& frag, array_t& data,
std::vector<std::vector<char>>& buffers)
: frag_(frag), data_(data), buffers_(buffers) {}
void exec(fid_t fid) {
if (fid == frag_.fid()) {
return;
}
auto& vec = buffers_[fid];
OutArchive arc;
arc.SetSlice(vec.data(), vec.size());
auto& vertices = frag_.OuterVertices(fid);
for (auto v : vertices) {
arc >> data_[v];
}
buffers_[fid].clear();
}
private:
const GRAPH_T& frag_;
array_t& data_;
std::vector<std::vector<char>>& buffers_;
};
} // namespace batch_shuffle_message_manager_impl
class BatchShuffleMessageManager : public MessageManagerBase {
template <typename FRAG_T, typename MESSAGE_T>
using shuffle_inplace_t =
batch_shuffle_message_manager_impl::ShuffleInplace<FRAG_T, MESSAGE_T>;
template <typename FRAG_T, typename MESSAGE_T>
using pod_shuffle_t =
batch_shuffle_message_manager_impl::PodShuffle<FRAG_T, MESSAGE_T>;
template <typename FRAG_T>
using archive_shuffle_t =
batch_shuffle_message_manager_impl::ArchiveShuffle<FRAG_T>;
public:
BatchShuffleMessageManager() : comm_(NULL_COMM) {}
~BatchShuffleMessageManager() {
if (ValidComm(comm_)) {
MPI_Comm_free(&comm_);
}
}
/**
* @brief Inherit
*/
void Init(MPI_Comm comm) override {
MPI_Comm_dup(comm, &comm_);
comm_spec_.Init(comm_);
fid_ = comm_spec_.fid();
fnum_ = comm_spec_.fnum();
remaining_reqs_.resize(fnum_);
force_terminate_ = false;
terminate_info_.Init(fnum_);
shuffle_out_buffers_.resize(fnum_);
shuffle_in_buffers_.resize(fnum_);
recv_thread_ =
std::thread(&BatchShuffleMessageManager::recvThreadRoutine, this);
}
/**
* @brief Inherit
*/
void Start() override {}
/**
* @brief Inherit
*/
void StartARound() override {
msg_size_ = 0;
to_terminate_ = true;
}
/**
* @brief Inherit
*/
void FinishARound() override {}
/**
* @brief Inherit
*/
bool ToTerminate() override {
int flag = force_terminate_ ? 1 : 0;
int ret;
MPI_Allreduce(&flag, &ret, 1, MPI_INT, MPI_SUM, comm_);
if (ret > 0) {
terminate_info_.success = false;
sync_comm::AllGather(terminate_info_.info, comm_);
return true;
}
return to_terminate_;
}
/**
* @brief Inherit
*/
size_t GetMsgSize() const override { return msg_size_; }
/**
* @brief Inherit
*/
void Finalize() override {
if (!send_reqs_.empty()) {
MPI_Waitall(send_reqs_.size(), &send_reqs_[0], MPI_STATUSES_IGNORE);
send_reqs_.clear();
}
if (!recv_reqs_.empty()) {
MPI_Waitall(recv_reqs_.size(), &recv_reqs_[0], MPI_STATUSES_IGNORE);
recv_reqs_.clear();
}
{
size_t v = 1;
MPI_Send(&v, sizeof(size_t), MPI_CHAR, comm_spec_.FragToWorker(fid_), 1,
comm_);
recv_thread_.join();
}
MPI_Comm_free(&comm_);
comm_ = NULL_COMM;
}
/**
* @brief Synchronize the inner vertices' data of a vertex array to their
* mirrors.
*
* @tparam GRAPH_T
* @tparam DATA_T
* @param frag
* @param data
*/
template <typename GRAPH_T, typename DATA_T>
void SyncInnerVertices(
const GRAPH_T& frag,
typename GRAPH_T::template vertex_array_t<DATA_T>& data,
int thread_num = std::thread::hardware_concurrency()) {
to_terminate_ = false;
if (!send_reqs_.empty()) {
MPI_Waitall(send_reqs_.size(), &send_reqs_[0], MPI_STATUSES_IGNORE);
send_reqs_.clear();
}
if (!recv_reqs_.empty()) {
MPI_Waitall(recv_reqs_.size(), &recv_reqs_[0], MPI_STATUSES_IGNORE);
recv_reqs_.clear();
recv_from_.clear();
}
startRecv<GRAPH_T, DATA_T>(frag, data, thread_num);
remaining_frags_ = fnum_ - 1;
startSend<GRAPH_T, DATA_T>(frag, data, thread_num);
}
/**
* @brief This function will block until all outer vertices are updated, that
* is, messages from all other fragments are received.
*/
void UpdateOuterVertices() {
while (remaining_frags_ != 0) {
UpdatePartialOuterVertices();
}
}
/**
* @brief This function will block until a set of messages from one fragment
* are received.
*
* @return Source fragment id.
*/
fid_t UpdatePartialOuterVertices() {
int index;
fid_t ret;
while (true) {
MPI_Waitany(recv_reqs_.size(), &recv_reqs_[0], &index, MPI_STATUS_IGNORE);
ret = recv_from_[index];
--remaining_reqs_[ret];
if (remaining_reqs_[ret] == 0) {
--remaining_frags_;
if (remaining_frags_ == 0) {
recv_reqs_.clear();
recv_from_.clear();
}
break;
}
}
if (post_process_handle_ != nullptr) {
post_process_handle_->exec(ret);
}
return ret;
}
/**
* @brief Inherit
*/
void ForceContinue() override {}
/**
* @brief Inherit
*/
void ForceTerminate(const std::string& terminate_info) override {
force_terminate_ = true;
terminate_info_.info[comm_spec_.fid()] = terminate_info;
}
/**
* @brief Inherit
*/
const TerminateInfo& GetTerminateInfo() const override {
return terminate_info_;
}
private:
void recvThreadRoutine() {
std::vector<MPI_Request> recv_thread_reqs(fnum_);
std::vector<size_t> numbers(fnum_);
for (fid_t src_fid = 0; src_fid < fnum_; ++src_fid) {
MPI_Irecv(&numbers[src_fid], sizeof(size_t), MPI_CHAR,
comm_spec_.FragToWorker(src_fid), 1, comm_,
&recv_thread_reqs[src_fid]);
}
int index;
MPI_Waitany(fnum_, &recv_thread_reqs[0], &index, MPI_STATUS_IGNORE);
CHECK(index == static_cast<int>(fid_));
for (fid_t src_fid = 0; src_fid < fnum_; ++src_fid) {
if (src_fid != fid_) {
MPI_Cancel(&recv_thread_reqs[src_fid]);
}
}
}
template <typename GRAPH_T, typename DATA_T>
typename std::enable_if<archive_shuffle_t<DATA_T>::value>::type startRecv(
const GRAPH_T& frag,
typename GRAPH_T::template vertex_array_t<DATA_T>& data, int thread_num) {
std::vector<std::thread> threads(thread_num);
std::atomic<fid_t> cur_fid(0);
std::vector<size_t> out_archive_sizes(fnum_), in_archive_sizes(fnum_);
for (int i = 0; i < thread_num; ++i) {
threads[i] = std::thread([&]() {
while (true) {
fid_t got = cur_fid.fetch_add(1);
if (got >= fnum_) {
break;
}
auto& arc = shuffle_out_archives_[got];
arc.Clear();
auto& v_set = frag.MirrorVertices(got);
for (auto v : v_set) {
arc << data[v];
}
out_archive_sizes[comm_spec_.FragToWorker(got)] = arc.GetSize();
}
});
}
for (auto& thrd : threads) {
thrd.join();
}
sync_comm::AllToAll(out_archive_sizes, in_archive_sizes, comm_spec_.comm());
for (fid_t i = 1; i < fnum_; ++i) {
fid_t src_fid = (fid_ + fnum_ - i) % fnum_;
auto& buffer = shuffle_in_buffers_[src_fid];
buffer.resize(in_archive_sizes[src_fid]);
int old_req_num = recv_reqs_.size();
sync_comm::irecv_buffer<char>(buffer.data(), buffer.size(),
comm_spec_.FragToWorker(src_fid), 0, comm_,
recv_reqs_);
int new_req_num = recv_reqs_.size();
recv_from_.resize(new_req_num, src_fid);
remaining_reqs_[src_fid] = new_req_num - old_req_num;
}
post_process_handle_ = std::make_shared<
batch_shuffle_message_manager_impl::PostProcess<GRAPH_T, DATA_T>>(
frag, data, shuffle_in_buffers_);
}
template <typename GRAPH_T, typename DATA_T>
typename std::enable_if<shuffle_inplace_t<GRAPH_T, DATA_T>::value>::type
startRecv(const GRAPH_T& frag,
typename GRAPH_T::template vertex_array_t<DATA_T>& data,
int thread_num) {
for (fid_t i = 1; i < fnum_; ++i) {
fid_t src_fid = (fid_ + fnum_ - i) % fnum_;
auto range = frag.OuterVertices(src_fid);
int old_req_num = recv_reqs_.size();
sync_comm::irecv_buffer<char>(
reinterpret_cast<char*>(&data[*range.begin()]),
range.size() * sizeof(DATA_T), comm_spec_.FragToWorker(src_fid), 0,
comm_, recv_reqs_);
int new_req_num = recv_reqs_.size();
recv_from_.resize(new_req_num, src_fid);
remaining_reqs_[src_fid] = new_req_num - old_req_num;
}
}
template <typename GRAPH_T, typename DATA_T>
typename std::enable_if<pod_shuffle_t<GRAPH_T, DATA_T>::value>::type
startRecv(const GRAPH_T& frag,
typename GRAPH_T::template vertex_array_t<DATA_T>& data,
int thread_num) {
for (fid_t i = 1; i < fnum_; ++i) {
fid_t src_fid = (fid_ + fnum_ - i) % fnum_;
auto& buffer = shuffle_in_buffers_[src_fid];
buffer.resize(frag.OuterVertices(src_fid).size() * sizeof(DATA_T));
int old_req_num = recv_reqs_.size();
sync_comm::irecv_buffer<char>(buffer.data(), buffer.size(),
comm_spec_.FragToWorker(src_fid), 0, comm_,
recv_reqs_);
int new_req_num = recv_reqs_.size();
recv_from_.resize(new_req_num, src_fid);
remaining_reqs_[src_fid] = new_req_num - old_req_num;
}
post_process_handle_ = std::make_shared<
batch_shuffle_message_manager_impl::PostProcess<GRAPH_T, DATA_T>>(
frag, data, shuffle_in_buffers_);
}
template <typename GRAPH_T, typename DATA_T>
typename std::enable_if<!archive_shuffle_t<DATA_T>::value>::type startSend(
const GRAPH_T& frag,
const typename GRAPH_T::template vertex_array_t<DATA_T>& data,
int thread_num) {
for (fid_t i = 1; i < fnum_; ++i) {
fid_t dst_fid = (i + fid_) % fnum_;
auto& id_vec = frag.MirrorVertices(dst_fid);
auto& vec = shuffle_out_buffers_[dst_fid];
vec.clear();
vec.resize(id_vec.size() * sizeof(DATA_T));
DATA_T* buf = reinterpret_cast<DATA_T*>(vec.data());
size_t num = id_vec.size();
#pragma omp parallel for num_threads(thread_num)
for (size_t k = 0; k < num; ++k) {
buf[k] = data[id_vec[k]];
}
sync_comm::isend_buffer<char>(vec.data(), vec.size(),
comm_spec_.FragToWorker(dst_fid), 0, comm_,
send_reqs_);
msg_size_ += vec.size();
}
}
template <typename GRAPH_T, typename DATA_T>
typename std::enable_if<archive_shuffle_t<DATA_T>::value>::type startSend(
const GRAPH_T& frag,
const typename GRAPH_T::template vertex_array_t<DATA_T>& data,
int thread_num) {
for (fid_t i = 1; i < fnum_; ++i) {
fid_t dst_fid = (i + fid_) % fnum_;
auto& arc = shuffle_out_archives_[dst_fid];
sync_comm::isend_buffer<char>(arc.GetBuffer(), arc.GetSize(),
comm_spec_.FragToWorker(dst_fid), 0, comm_,
send_reqs_);
msg_size_ += arc.GetSize();
}
}
template <typename GRAPH_T, typename DATA_T>
typename std::enable_if<!shuffle_inplace_t<GRAPH_T, DATA_T>::value>::type
postProcess(const GRAPH_T& frag, fid_t i,
const typename GRAPH_T::template vertex_array_t<DATA_T>& data,
int thread_num) {
if (i == fid_) {
return;
}
auto& vec = shuffle_in_buffers_[i];
OutArchive arc;
arc.SetSlice(vec.data(), vec.size());
auto& vertices = frag.OuterVertices(i);
for (auto v : vertices) {
arc >> data[v];
}
}
template <typename GRAPH_T, typename DATA_T>
typename std::enable_if<shuffle_inplace_t<GRAPH_T, DATA_T>::value>::type
postProcess(const GRAPH_T& frag, fid_t i,
const typename GRAPH_T::template vertex_array_t<DATA_T>& data,
int thread_num) {}
fid_t fid_;
fid_t fnum_;
CommSpec comm_spec_;
MPI_Comm comm_;
std::vector<std::vector<char>> shuffle_out_buffers_;
std::vector<InArchive> shuffle_out_archives_;
std::vector<std::vector<char>> shuffle_in_buffers_;
std::shared_ptr<batch_shuffle_message_manager_impl::PostProcessBase>
post_process_handle_;
std::vector<MPI_Request> recv_reqs_;
std::vector<fid_t> recv_from_;
std::vector<int> remaining_reqs_;
fid_t remaining_frags_;
std::vector<MPI_Request> send_reqs_;
size_t msg_size_;
std::thread recv_thread_;
bool to_terminate_;
bool force_terminate_;
TerminateInfo terminate_info_;
};
} // namespace grape
#endif // GRAPE_PARALLEL_BATCH_SHUFFLE_MESSAGE_MANAGER_H_
|
sdna_spatial_accumulators.h | //sDNA software for spatial network analysis
//Copyright (C) 2011-2019 Cardiff University
//This program is free software: you can redistribute it and/or modify
//it under the terms of the GNU General Public License as published by
//the Free Software Foundation, either version 3 of the License, or
//(at your option) any later version.
//This program is distributed in the hope that it will be useful,
//but WITHOUT ANY WARRANTY; without even the implied warranty of
//MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
//GNU General Public License for more details.
//You should have received a copy of the GNU General Public License
//along with this program. If not, see <https://www.gnu.org/licenses/>.
#include "sdna_arrays.h"
#include "edge.h"
#pragma once
template <typename T> class ThreadSafeAccumulator : public SDNAPolylineIdRadiusIndexed2dArrayBase
{
SDNAPolylineIdRadiusIndexed2dArray<T> data;
public:
virtual void enable() {data.enable();}
virtual bool is_enabled() {return data.is_enabled();}
void initialize(size_t num_links, size_t num_radii)
{
data.initialize(num_links,num_radii,0.);
}
void increment(const SDNAPolyline * const link,size_t radius,double x)
{
T& d=data(link,radius);
#pragma omp atomic
d += x;
}
virtual float floatvalue(SDNAPolylineId link,size_t radius)
{
return (float)data(link,radius);
}
};
template <typename T> class BidirectionalThreadSafeAccumulator
{
ThreadSafeAccumulator<T> fwd_data, bwd_data;
bool use_both_directions, is_initialized;
public:
BidirectionalThreadSafeAccumulator<T>() : use_both_directions(false), is_initialized(false) {}
void set_bidirectional() {assert(!is_initialized); use_both_directions=true;}
void enable()
{
fwd_data.enable();
if (use_both_directions)
bwd_data.enable();
}
bool is_enabled() {assert (fwd_data.is_enabled()==bwd_data.is_enabled()); return fwd_data.is_enabled();}
void initialize(size_t num_links, size_t num_radii)
{
is_initialized=true;
fwd_data.initialize(num_links,num_radii);
if (use_both_directions)
bwd_data.initialize(num_links,num_radii);
}
void increment_both(const SDNAPolyline * const link,size_t radius,double x)
{
if (use_both_directions)
{
fwd_data.increment(link,radius,x);
bwd_data.increment(link,radius,x);
}
else
fwd_data.increment(link,radius,x*2);
}
void increment(const Edge * const edge,size_t radius,double x)
{
assert(edge->direction==PLUS || edge->direction==MINUS);
if (!use_both_directions || edge->direction==PLUS)
fwd_data.increment(edge->link,radius,x);
else
bwd_data.increment(edge->link,radius,x);
}
SDNAPolylineIdRadiusIndexed2dArrayBase* get_output_facade(polarity direction)
{
assert(use_both_directions);
assert(direction==PLUS || direction==MINUS);
if (direction==PLUS)
return &fwd_data;
else
return &bwd_data;
}
SDNAPolylineIdRadiusIndexed2dArrayBase* get_output_facade_bidirectional()
{
assert(!use_both_directions);
return &fwd_data;
}
bool is_bidirectional()
{
return use_both_directions;
}
};
class AccessibilityAccumulator : public SDNAPolylineIdRadiusIndexed2dArrayBase
{
SDNAPolylineIdRadiusIndexed2dArray<double> accessibility;
float distance_constant_factor;
double nqpdn, nqpdd;
public:
virtual void enable() {accessibility.enable();}
virtual bool is_enabled() {return accessibility.is_enabled();}
void initialize(size_t num_links, size_t num_radii,float dcf,double nqpdn_in,double nqpdd_in)
{
distance_constant_factor = dcf;
accessibility.initialize(num_links,num_radii,0.);
nqpdn = nqpdn_in;
nqpdd = nqpdd_in;
}
void increment(SDNAPolyline *link,size_t radius,double quantity,double cost)
{
//to prevent zero angular costs
cost += distance_constant_factor;
//just in case cost was 0, we wouldn't want to make infinite accessibility for a 0-sized piece of network
//(though if quantity is nonzero then an infinite accessibility is fair)
if (quantity==0)
return;
accessibility(link,radius) += pow(quantity,nqpdn)/pow(cost,nqpdd);
}
virtual float floatvalue(SDNAPolylineId link,size_t radius)
{
return (float)accessibility(link,radius);
}
};
class ThreadSafeProgressBar
{
private:
size_t num_links_computed, last_progress_bar_pos, increment, total_num_links;
int (__cdecl *set_progressor_callback)(float);
public:
ThreadSafeProgressBar(size_t total_num_links,int (__cdecl *set_progressor_callback)(float))
: total_num_links(total_num_links),
increment(total_num_links/1000),
last_progress_bar_pos(-1),
num_links_computed(0),
set_progressor_callback(set_progressor_callback)
{
if (increment==0)
increment = total_num_links;
}
void increment_bar(long skip_fraction)
{
#pragma omp atomic
num_links_computed+=skip_fraction;
//if we have computed another [increment] links since last update
//then signal new progress bar position via callback
// -- master thread only due to com marshalling
if (OMP_THREAD==0)
{
size_t current_progress_bar_pos = num_links_computed / increment;
if (last_progress_bar_pos < current_progress_bar_pos)
set_progressor_callback((float)num_links_computed/(float)total_num_links*100.f);
last_progress_bar_pos = current_progress_bar_pos;
}
}
};
template <class T> class ThreadSafeVector
{
private:
vector<T> v;
boost::mutex mutex;
public:
//push_back and reserve lock with mutex
void push_back(T& item)
{
boost::lock_guard<boost::mutex> lock(mutex);
v.push_back(item);
}
void reserve(size_t n)
{
boost::lock_guard<boost::mutex> lock(mutex);
v.reserve(n);
}
//forward other methods (not thread safe) just to keep track of what's used
size_t size()
{
return v.size();
}
typename vector<T>::iterator begin() {return v.begin();}
typename vector<T>::iterator end() {return v.end();}
//copy constructor creates new mutex
ThreadSafeVector<T>(ThreadSafeVector<T> &other) : vector<T>(other) {}
//default constructor needed because copy constructor suppresses it
ThreadSafeVector<T>() {}
//assignment operator keeps same mutex
ThreadSafeVector<T>& operator=(ThreadSafeVector<T> &other)
{
boost::lock_guard<boost::mutex> lock(mutex);
v = other.v;
return *this;
}
};
|
convolutiondepthwise_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char*)_kernel + p * 9;
int* outptr0 = out;
int* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* r3 = img0 + w * 3;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i + 1 < outh; i += 2)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v4.8b, v5.8b}, [%3] \n"
"ld1 {v6.8b, v7.8b}, [%4] \n"
"ld1 {v8.8b, v9.8b}, [%5] \n"
"ld1 {v10.8b, v11.8b}, [%6] \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"add %6, %6, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"ext v18.8b, v10.8b, v11.8b, #1 \n"
"ext v19.8b, v10.8b, v11.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v12.8h, v12.8b, #0 \n" // r01
"sshll v13.8h, v13.8b, #0 \n" // r02
"sshll v6.8h, v6.8b, #0 \n" // r10
"sshll v14.8h, v14.8b, #0 \n" // r11
"sshll v15.8h, v15.8b, #0 \n" // r12
"sshll v8.8h, v8.8b, #0 \n" // r20
"sshll v16.8h, v16.8b, #0 \n" // r21
"sshll v17.8h, v17.8b, #0 \n" // r22
"sshll v10.8h, v10.8b, #0 \n" // r30
"sshll v18.8h, v18.8b, #0 \n" // r31
"sshll v19.8h, v19.8b, #0 \n" // r32
// r0
"smull v20.4s, v4.4h, %14.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %14.h[0] \n"
"smull v22.4s, v12.4h, %14.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %14.h[1] \n"
"smull v24.4s, v13.4h, %14.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %14.h[2] \n"
// r1
"smull v26.4s, v6.4h, %14.h[0] \n" // (r10 - r17) * k00
"smull2 v27.4s, v6.8h, %14.h[0] \n"
"smull v28.4s, v14.4h, %14.h[1] \n" // (r11 - r18) * k01
"smull2 v29.4s, v14.8h, %14.h[1] \n"
"smull v30.4s, v15.4h, %14.h[2] \n" // (r12 - r19) * k02
"smull2 v31.4s, v15.8h, %14.h[2] \n"
"smlal v20.4s, v6.4h, %14.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %14.h[3] \n"
"smlal v22.4s, v14.4h, %15.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %15.h[0] \n"
"smlal v24.4s, v15.4h, %15.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %15.h[1] \n"
// r2
"smlal v26.4s, v8.4h, %14.h[3] \n" // (r20 - r27) * k03
"smlal2 v27.4s, v8.8h, %14.h[3] \n"
"smlal v28.4s, v16.4h, %15.h[0] \n" // (r21 - r28) * k04
"smlal2 v29.4s, v16.8h, %15.h[0] \n"
"smlal v30.4s, v17.4h, %15.h[1] \n" // (r22 - r29) * k05
"smlal2 v31.4s, v17.8h, %15.h[1] \n"
"smlal v20.4s, v8.4h, %15.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %15.h[2] \n"
"smlal v22.4s, v16.4h, %15.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %15.h[3] \n"
"smlal v24.4s, v17.4h, %16.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %16.h[0] \n"
// r3
"smlal v26.4s, v10.4h, %15.h[2] \n" // (r30 - r37) * k06
"smlal2 v27.4s, v10.8h, %15.h[2] \n"
"smlal v28.4s, v18.4h, %15.h[3] \n" // (r31 - r38) * k07
"smlal2 v29.4s, v18.8h, %15.h[3] \n"
"smlal v30.4s, v19.4h, %16.h[0] \n" // (r32 - r39) * k08
"smlal2 v31.4s, v19.8h, %16.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v26.4s, v26.4s, v28.4s \n"
"add v27.4s, v27.4s, v29.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"add v26.4s, v26.4s, v30.4s \n"
"add v27.4s, v27.4s, v31.4s \n"
"st1 {v20.4s, v21.4s}, [%1], #32 \n"
"st1 {v26.4s, v27.4s}, [%2], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx) // %16
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%3] \n" // r0
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q5, d10 \n" // r01
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P14[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P14[0] \n"
"vmull.s16 q9, d10, %P14[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P14[1] \n"
"vmlal.s16 q7, d12, %P14[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P14[2] \n"
// r1
"vld1.s8 {d30-d31}, [%4] \n" // r1
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q5, d10 \n" // r11
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P14[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P14[3] \n"
"vmlal.s16 q9, d10, %P15[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P15[0] \n"
"vmlal.s16 q7, d12, %P15[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P15[1] \n"
// sum1
"vmull.s16 q11, d30, %P14[0] \n" // (r10 - r17) * k00
"vmull.s16 q12, d31, %P14[0] \n"
"vmull.s16 q13, d10, %P14[1] \n" // (r11 - r18) * k01
"vmull.s16 q14, d11, %P14[1] \n"
"vmlal.s16 q11, d12, %P14[2] \n" // (r12 - r19) * k02
"vmlal.s16 q12, d13, %P14[2] \n"
// r2
"vld1.s8 {d30-d31}, [%5] \n" // r2
"add %5, %5, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q5, d10 \n" // r21
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P15[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P15[2] \n"
"vmlal.s16 q9, d10, %P15[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P15[3] \n"
"vmlal.s16 q7, d12, %P16[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P16[0] \n"
// sum1
"vmlal.s16 q11, d30, %P14[3] \n" // (r20 - r27) * k03
"vmlal.s16 q12, d31, %P14[3] \n"
"vmlal.s16 q13, d10, %P15[0] \n" // (r21 - r28) * k04
"vmlal.s16 q14, d11, %P15[0] \n"
"vmlal.s16 q11, d12, %P15[1] \n" // (r22 - r29) * k05
"vmlal.s16 q12, d13, %P15[1] \n"
// r3
"vld1.s8 {d30-d31}, [%6] \n" // r3
"add %6, %6, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r30
"vmovl.s8 q5, d10 \n" // r31
"vmovl.s8 q6, d12 \n" // r32
// sum1
"vmlal.s16 q11, d30, %P15[2] \n" // (r30 - r37) * k06
"vmlal.s16 q12, d31, %P15[2] \n"
"vmlal.s16 q13, d10, %P15[3] \n" // (r31 - r38) * k07
"vmlal.s16 q14, d11, %P15[3] \n"
"vmlal.s16 q11, d12, %P16[0] \n" // (r32 - r39) * k08
"vmlal.s16 q12, d13, %P16[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vadd.s32 q11, q11, q13 \n"
"vadd.s32 q12, q12, q14 \n"
"vst1.s32 {d14-d17}, [%1]! \n"
"vst1.s32 {d22-d25}, [%2]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx) // %16
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
// TODO NEON
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel[0];
sum0 += (int)r0[1] * kernel[1];
sum0 += (int)r0[2] * kernel[2];
sum0 += (int)r1[0] * kernel[3];
sum0 += (int)r1[1] * kernel[4];
sum0 += (int)r1[2] * kernel[5];
sum0 += (int)r2[0] * kernel[6];
sum0 += (int)r2[1] * kernel[7];
sum0 += (int)r2[2] * kernel[8];
sum0n += (int)r1[0] * kernel[0];
sum0n += (int)r1[1] * kernel[1];
sum0n += (int)r1[2] * kernel[2];
sum0n += (int)r2[0] * kernel[3];
sum0n += (int)r2[1] * kernel[4];
sum0n += (int)r2[2] * kernel[5];
sum0n += (int)r3[0] * kernel[6];
sum0n += (int)r3[1] * kernel[7];
sum0n += (int)r3[2] * kernel[8];
*outptr0 = sum0;
*outptr0n = sum0n;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr0n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v4.8b, v5.8b}, [%2] \n"
"ld1 {v6.8b, v7.8b}, [%3] \n"
"ld1 {v8.8b, v9.8b}, [%4] \n"
"add %2, %2, #8 \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v12.8h, v12.8b, #0 \n" // r01
"sshll v13.8h, v13.8b, #0 \n" // r02
"sshll v6.8h, v6.8b, #0 \n" // r10
"sshll v14.8h, v14.8b, #0 \n" // r11
"sshll v15.8h, v15.8b, #0 \n" // r12
"sshll v8.8h, v8.8b, #0 \n" // r20
"sshll v16.8h, v16.8b, #0 \n" // r21
"sshll v17.8h, v17.8b, #0 \n" // r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v12.4h, %10.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %10.h[1] \n"
"smull v24.4s, v13.4h, %10.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v6.4h, %10.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %10.h[3] \n"
"smlal v22.4s, v14.4h, %11.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %11.h[0] \n"
"smlal v24.4s, v15.4h, %11.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v8.4h, %11.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %11.h[2] \n"
"smlal v22.4s, v16.4h, %11.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %11.h[3] \n"
"smlal v24.4s, v17.4h, %12.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"st1 {v20.4s, v21.4s}, [%1], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%2] \n" // r0
"add %2, %2, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q5, d10 \n" // r01
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld1.s8 {d30-d31}, [%3] \n" // r1
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q5, d10 \n" // r11
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld1.s8 {d30-d31}, [%4] \n" // r2
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q5, d10 \n" // r21
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vst1.s32 {d14-d17}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr0 = sum;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char*)_kernel + p * 9;
int* outptr = out;
const signed char* img = bottom_blob.channel(p);
const signed char* r0 = img;
const signed char* r1 = img + w;
const signed char* r2 = img + w * 2;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld2 {v4.8b, v5.8b}, [%2], #16 \n"
"ld2 {v6.8b, v7.8b}, [%2] \n"
"ld2 {v8.8b, v9.8b}, [%3], #16 \n"
"ld2 {v10.8b, v11.8b}, [%3] \n"
"ld2 {v12.8b, v13.8b}, [%4], #16 \n"
"ld2 {v14.8b, v15.8b}, [%4] \n"
"ext v6.8b, v4.8b, v6.8b, #1 \n"
"ext v10.8b, v8.8b, v10.8b, #1 \n"
"ext v14.8b, v12.8b, v14.8b, #1 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v5.8h, v5.8b, #0 \n" // r01
"sshll v6.8h, v6.8b, #0 \n" // r02
"sshll v8.8h, v8.8b, #0 \n" // r10
"sshll v9.8h, v9.8b, #0 \n" // r11
"sshll v10.8h, v10.8b, #0 \n" // r12
"sshll v12.8h, v12.8b, #0 \n" // r20
"sshll v13.8h, v13.8b, #0 \n" // r21
"sshll v14.8h, v14.8b, #0 \n" // r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v5.4h, %10.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v5.8h, %10.h[1] \n"
"smull v24.4s, v6.4h, %10.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v6.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v8.4h, %10.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v8.8h, %10.h[3] \n"
"smlal v22.4s, v9.4h, %11.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v9.8h, %11.h[0] \n"
"smlal v24.4s, v10.4h, %11.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v10.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v12.4h, %11.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v12.8h, %11.h[2] \n"
"smlal v22.4s, v13.4h, %11.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v13.8h, %11.h[3] \n"
"smlal v24.4s, v14.4h, %12.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v14.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"st1 {v20.4s, v21.4s}, [%1], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld2.s8 {d30-d31}, [%2]! \n" // r0
"vld2.s8 {d10-d11}, [%2] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r01
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld2.s8 {d30-d31}, [%3]! \n" // r1
"vld2.s8 {d10-d11}, [%3] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r11
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld2.s8 {d30-d31}, [%4]! \n" // r2
"vld2.s8 {d10-d11}, [%4] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r21
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vst1.s32 {d14-d17}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
static void convdw3x3s1_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2 * p];
const float scale_requant_out = scales_requant[2 * p + 1];
const signed char* kernel = (const signed char*)_kernel + p * 9;
signed char* outptr0 = out;
signed char* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* r3 = img0 + w * 3;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i + 1 < outh; i += 2)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v4.8b, v5.8b}, [%3] \n"
"ld1 {v6.8b, v7.8b}, [%4] \n"
"ld1 {v8.8b, v9.8b}, [%5] \n"
"ld1 {v10.8b, v11.8b}, [%6] \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"add %6, %6, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"ext v18.8b, v10.8b, v11.8b, #1 \n"
"ext v19.8b, v10.8b, v11.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v12.8h, v12.8b, #0 \n" // r01
"sshll v13.8h, v13.8b, #0 \n" // r02
"sshll v6.8h, v6.8b, #0 \n" // r10
"sshll v14.8h, v14.8b, #0 \n" // r11
"sshll v15.8h, v15.8b, #0 \n" // r12
"sshll v8.8h, v8.8b, #0 \n" // r20
"sshll v16.8h, v16.8b, #0 \n" // r21
"sshll v17.8h, v17.8b, #0 \n" // r22
"sshll v10.8h, v10.8b, #0 \n" // r30
"sshll v18.8h, v18.8b, #0 \n" // r31
"sshll v19.8h, v19.8b, #0 \n" // r32
// r0
"smull v20.4s, v4.4h, %14.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %14.h[0] \n"
"smull v22.4s, v12.4h, %14.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %14.h[1] \n"
"smull v24.4s, v13.4h, %14.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %14.h[2] \n"
// r1
"smull v26.4s, v6.4h, %14.h[0] \n" // (r10 - r17) * k00
"smull2 v27.4s, v6.8h, %14.h[0] \n"
"smull v28.4s, v14.4h, %14.h[1] \n" // (r11 - r18) * k01
"smull2 v29.4s, v14.8h, %14.h[1] \n"
"smull v30.4s, v15.4h, %14.h[2] \n" // (r12 - r19) * k02
"smull2 v31.4s, v15.8h, %14.h[2] \n"
"smlal v20.4s, v6.4h, %14.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %14.h[3] \n"
"smlal v22.4s, v14.4h, %15.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %15.h[0] \n"
"smlal v24.4s, v15.4h, %15.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %15.h[1] \n"
// r2
"smlal v26.4s, v8.4h, %14.h[3] \n" // (r20 - r27) * k03
"smlal2 v27.4s, v8.8h, %14.h[3] \n"
"smlal v28.4s, v16.4h, %15.h[0] \n" // (r21 - r28) * k04
"smlal2 v29.4s, v16.8h, %15.h[0] \n"
"smlal v30.4s, v17.4h, %15.h[1] \n" // (r22 - r29) * k05
"smlal2 v31.4s, v17.8h, %15.h[1] \n"
"smlal v20.4s, v8.4h, %15.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %15.h[2] \n"
"smlal v22.4s, v16.4h, %15.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %15.h[3] \n"
"smlal v24.4s, v17.4h, %16.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %16.h[0] \n"
// r3
"smlal v26.4s, v10.4h, %15.h[2] \n" // (r30 - r37) * k06
"smlal2 v27.4s, v10.8h, %15.h[2] \n"
"smlal v28.4s, v18.4h, %15.h[3] \n" // (r31 - r38) * k07
"smlal2 v29.4s, v18.8h, %15.h[3] \n"
"smlal v30.4s, v19.4h, %16.h[0] \n" // (r32 - r39) * k08
"smlal2 v31.4s, v19.8h, %16.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v26.4s, v26.4s, v28.4s \n"
"add v27.4s, v27.4s, v29.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"add v26.4s, v26.4s, v30.4s \n"
"add v27.4s, v27.4s, v31.4s \n"
"dup v4.4s, %w17 \n" // bias
"dup v5.4s, %w18 \n" // scale_in
"dup v6.4s, %w19 \n" // scale_out
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
"scvtf v26.4s, v26.4s \n"
"scvtf v27.4s, v27.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, v5.4s \n"
"fmul v21.4s, v21.4s, v5.4s \n"
"fmul v26.4s, v26.4s, v5.4s \n"
"fmul v27.4s, v27.4s, v5.4s \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, v4.4s \n"
"fadd v21.4s, v21.4s, v4.4s \n"
"fadd v26.4s, v26.4s, v4.4s \n"
"fadd v27.4s, v27.4s, v4.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, v6.4s \n"
"fmul v21.4s, v21.4s, v6.4s \n"
"fmul v26.4s, v26.4s, v6.4s \n"
"fmul v27.4s, v27.4s, v6.4s \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
"fcvtas v26.4s, v26.4s \n"
"fcvtas v27.4s, v27.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn v9.4h, v26.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
"sqxtn2 v9.8h, v27.4s \n"
// top_s16 -> top_s8
"sqxtn v8.8b, v7.8h \n"
"sqxtn v10.8b, v9.8h \n"
// save top_s8
"st1 {v8.8b}, [%1], #8 \n"
"st1 {v10.8b}, [%2], #8 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx), // %16
"r"(bias0), // %17
"r"(scale_requant_in), // %18
"r"(scale_requant_out) // %19
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%3] \n" // r0
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q5, d10 \n" // r01
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P14[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P14[0] \n"
"vmull.s16 q9, d10, %P14[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P14[1] \n"
"vmlal.s16 q7, d12, %P14[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P14[2] \n"
// r1
"vld1.s8 {d30-d31}, [%4] \n" // r1
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q5, d10 \n" // r11
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P14[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P14[3] \n"
"vmlal.s16 q9, d10, %P15[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P15[0] \n"
"vmlal.s16 q7, d12, %P15[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P15[1] \n"
// sum1
"vmull.s16 q11, d30, %P14[0] \n" // (r10 - r17) * k00
"vmull.s16 q12, d31, %P14[0] \n"
"vmull.s16 q13, d10, %P14[1] \n" // (r11 - r18) * k01
"vmull.s16 q14, d11, %P14[1] \n"
"vmlal.s16 q11, d12, %P14[2] \n" // (r12 - r19) * k02
"vmlal.s16 q12, d13, %P14[2] \n"
// r2
"vld1.s8 {d30-d31}, [%5] \n" // r2
"add %5, %5, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q5, d10 \n" // r21
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P15[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P15[2] \n"
"vmlal.s16 q9, d10, %P15[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P15[3] \n"
"vmlal.s16 q7, d12, %P16[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P16[0] \n"
// sum1
"vmlal.s16 q11, d30, %P14[3] \n" // (r20 - r27) * k03
"vmlal.s16 q12, d31, %P14[3] \n"
"vmlal.s16 q13, d10, %P15[0] \n" // (r21 - r28) * k04
"vmlal.s16 q14, d11, %P15[0] \n"
"vmlal.s16 q11, d12, %P15[1] \n" // (r22 - r29) * k05
"vmlal.s16 q12, d13, %P15[1] \n"
// r3
"vld1.s8 {d30-d31}, [%6] \n" // r3
"add %6, %6, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r30
"vmovl.s8 q5, d10 \n" // r31
"vmovl.s8 q6, d12 \n" // r32
// sum1
"vmlal.s16 q11, d30, %P15[2] \n" // (r30 - r37) * k06
"vmlal.s16 q12, d31, %P15[2] \n"
"vmlal.s16 q13, d10, %P15[3] \n" // (r31 - r38) * k07
"vmlal.s16 q14, d11, %P15[3] \n"
"vmlal.s16 q11, d12, %P16[0] \n" // (r32 - r39) * k08
"vmlal.s16 q12, d13, %P16[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vadd.s32 q11, q11, q13 \n"
"vadd.s32 q12, q12, q14 \n"
"vdup.f32 q13, %17 \n" // bias
"vdup.f32 q14, %18 \n" // scale_in
"vdup.f32 q15, %19 \n" // scale_out
// top_s32 -> top_f32
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q7, q14 \n"
"vmul.f32 q4, q8, q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q13 \n"
"vadd.f32 q4, q4, q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q15 \n"
"vmul.f32 q4, q4, q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%1]! \n"
// top_s32 -> top_f32
"vcvt.f32.s32 q11, q11 \n"
"vcvt.f32.s32 q12, q12 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q11, q14 \n"
"vmul.f32 q4, q12, q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q13 \n"
"vadd.f32 q4, q4, q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q15 \n"
"vmul.f32 q4, q4, q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%2]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx), // %16
"r"(bias0), // %17
"r"(scale_requant_in), // %18
"r"(scale_requant_out) // %19
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
// TODO NEON
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel[0];
sum0 += (int)r0[1] * kernel[1];
sum0 += (int)r0[2] * kernel[2];
sum0 += (int)r1[0] * kernel[3];
sum0 += (int)r1[1] * kernel[4];
sum0 += (int)r1[2] * kernel[5];
sum0 += (int)r2[0] * kernel[6];
sum0 += (int)r2[1] * kernel[7];
sum0 += (int)r2[2] * kernel[8];
sum0n += (int)r1[0] * kernel[0];
sum0n += (int)r1[1] * kernel[1];
sum0n += (int)r1[2] * kernel[2];
sum0n += (int)r2[0] * kernel[3];
sum0n += (int)r2[1] * kernel[4];
sum0n += (int)r2[2] * kernel[5];
sum0n += (int)r3[0] * kernel[6];
sum0n += (int)r3[1] * kernel[7];
sum0n += (int)r3[2] * kernel[8];
*outptr0 = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
*outptr0n = float2int8(((float)sum0n * scale_requant_in + bias0) * scale_requant_out);
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr0n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"dup v26.4s, %w13 \n"
"dup v27.4s, %w14 \n"
"dup v28.4s, %w15 \n"
"0: \n"
"ld1 {v4.8b, v5.8b}, [%2] \n"
"ld1 {v6.8b, v7.8b}, [%3] \n"
"ld1 {v8.8b, v9.8b}, [%4] \n"
"add %2, %2, #8 \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v12.8h, v12.8b, #0 \n" // r01
"sshll v13.8h, v13.8b, #0 \n" // r02
"sshll v6.8h, v6.8b, #0 \n" // r10
"sshll v14.8h, v14.8b, #0 \n" // r11
"sshll v15.8h, v15.8b, #0 \n" // r12
"sshll v8.8h, v8.8b, #0 \n" // r20
"sshll v16.8h, v16.8b, #0 \n" // r21
"sshll v17.8h, v17.8b, #0 \n" // r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v12.4h, %10.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %10.h[1] \n"
"smull v24.4s, v13.4h, %10.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v6.4h, %10.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %10.h[3] \n"
"smlal v22.4s, v14.4h, %11.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %11.h[0] \n"
"smlal v24.4s, v15.4h, %11.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v8.4h, %11.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %11.h[2] \n"
"smlal v22.4s, v16.4h, %11.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %11.h[3] \n"
"smlal v24.4s, v17.4h, %12.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, v27.4s \n"
"fmul v21.4s, v21.4s, v27.4s \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, v26.4s \n"
"fadd v21.4s, v21.4s, v26.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, v28.4s \n"
"fmul v21.4s, v21.4s, v28.4s \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
// top_s16 -> top_s8
"sqxtn v8.8b, v7.8h \n"
// save top_s8
"st1 {v8.8b}, [%1], #8 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%2] \n" // r0
"add %2, %2, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q5, d10 \n" // r01
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld1.s8 {d30-d31}, [%3] \n" // r1
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q5, d10 \n" // r11
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld1.s8 {d30-d31}, [%4] \n" // r2
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q5, d10 \n" // r21
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vdup.f32 q13, %13 \n" // bias
"vdup.f32 q14, %14 \n" // scale_in
"vdup.f32 q15, %15 \n" // scale_out
// top_s32 -> top_f32
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q7, q14 \n"
"vmul.f32 q4, q8, q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q13 \n"
"vadd.f32 q4, q4, q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q15 \n"
"vmul.f32 q4, q4, q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr0 = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out);
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
const int tailstep = w - 2 * outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2 * p];
const float scale_requant_out = scales_requant[2 * p + 1];
const signed char* kernel = (const signed char*)_kernel + p * 9;
signed char* outptr = out;
const signed char* img = bottom_blob.channel(p);
const signed char* r0 = img;
const signed char* r1 = img + w;
const signed char* r2 = img + w * 2;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"dup v26.4s, %w13 \n"
"dup v27.4s, %w14 \n"
"dup v28.4s, %w15 \n"
"0: \n"
"ld2 {v4.8b, v5.8b}, [%2], #16 \n"
"ld2 {v6.8b, v7.8b}, [%2] \n"
"ld2 {v8.8b, v9.8b}, [%3], #16 \n"
"ld2 {v10.8b, v11.8b}, [%3] \n"
"ld2 {v12.8b, v13.8b}, [%4], #16 \n"
"ld2 {v14.8b, v15.8b}, [%4] \n"
"ext v6.8b, v4.8b, v6.8b, #1 \n"
"ext v10.8b, v8.8b, v10.8b, #1 \n"
"ext v14.8b, v12.8b, v14.8b, #1 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v5.8h, v5.8b, #0 \n" // r01
"sshll v6.8h, v6.8b, #0 \n" // r02
"sshll v8.8h, v8.8b, #0 \n" // r10
"sshll v9.8h, v9.8b, #0 \n" // r11
"sshll v10.8h, v10.8b, #0 \n" // r12
"sshll v12.8h, v12.8b, #0 \n" // r20
"sshll v13.8h, v13.8b, #0 \n" // r21
"sshll v14.8h, v14.8b, #0 \n" // r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v5.4h, %10.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v5.8h, %10.h[1] \n"
"smull v24.4s, v6.4h, %10.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v6.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v8.4h, %10.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v8.8h, %10.h[3] \n"
"smlal v22.4s, v9.4h, %11.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v9.8h, %11.h[0] \n"
"smlal v24.4s, v10.4h, %11.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v10.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v12.4h, %11.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v12.8h, %11.h[2] \n"
"smlal v22.4s, v13.4h, %11.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v13.8h, %11.h[3] \n"
"smlal v24.4s, v14.4h, %12.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v14.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, v27.4s \n"
"fmul v21.4s, v21.4s, v27.4s \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, v26.4s \n"
"fadd v21.4s, v21.4s, v26.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, v28.4s \n"
"fmul v21.4s, v21.4s, v28.4s \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
// top_s16 -> top_s8
"sqxtn v8.8b, v7.8h \n"
// save top_s8
"st1 {v8.8b}, [%1], #8 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld2.s8 {d30-d31}, [%2]! \n" // r0
"vld2.s8 {d10-d11}, [%2] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r01
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld2.s8 {d30-d31}, [%3]! \n" // r1
"vld2.s8 {d10-d11}, [%3] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r11
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld2.s8 {d30-d31}, [%4]! \n" // r2
"vld2.s8 {d10-d11}, [%4] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r21
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vdup.f32 q11, %13 \n" // bias
"vdup.f32 q12, %14 \n" // scale_in
"vdup.f32 q13, %15 \n" // scale_out
// top_s32 -> top_f32
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q7, q12 \n"
"vmul.f32 q4, q8, q12 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q11 \n"
"vadd.f32 q4, q4, q11 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q13 \n"
"vmul.f32 q4, q4, q13 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out);
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
GB_binop__isne_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint16)
// A*D function (colscale): GB (_AxD__isne_uint16)
// D*A function (rowscale): GB (_DxB__isne_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint16)
// C=scalar+B GB (_bind1st__isne_uint16)
// C=scalar+B' GB (_bind1st_tran__isne_uint16)
// C=A+scalar GB (_bind2nd__isne_uint16)
// C=A'+scalar GB (_bind2nd_tran__isne_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_UINT16 || GxB_NO_ISNE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fft_params.h | // Copyright (c) 2020 - present Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#ifndef FFT_PARAMS_H
#define FFT_PARAMS_H
#include <algorithm>
#include <complex>
#include <iostream>
#include <mutex>
#include <numeric>
#include <omp.h>
#include <random>
#include <tuple>
#include <vector>
#include "../shared/printbuffer.h"
#include "../shared/ptrdiff.h"
enum fft_status
{
fft_status_success,
fft_status_failure,
fft_status_invalid_arg_value,
fft_status_invalid_dimensions,
fft_status_invalid_array_type,
fft_status_invalid_strides,
fft_status_invalid_distance,
fft_status_invalid_offset,
fft_status_invalid_work_buffer,
};
enum fft_transform_type
{
fft_transform_type_complex_forward,
fft_transform_type_complex_inverse,
fft_transform_type_real_forward,
fft_transform_type_real_inverse,
};
enum fft_precision
{
fft_precision_single,
fft_precision_double,
};
enum fft_array_type
{
fft_array_type_complex_interleaved,
fft_array_type_complex_planar,
fft_array_type_real,
fft_array_type_hermitian_interleaved,
fft_array_type_hermitian_planar,
fft_array_type_unset,
};
enum fft_result_placement
{
fft_placement_inplace,
fft_placement_notinplace,
};
// Determine the size of the data type given the precision and type.
template <typename Tsize>
inline Tsize var_size(const fft_precision precision, const fft_array_type type)
{
size_t var_size = 0;
switch(precision)
{
case fft_precision_single:
var_size = sizeof(float);
break;
case fft_precision_double:
var_size = sizeof(double);
break;
}
switch(type)
{
case fft_array_type_complex_interleaved:
case fft_array_type_hermitian_interleaved:
var_size *= 2;
break;
default:
break;
}
return var_size;
}
// Container class for test parameters.
class fft_params
{
public:
// All parameters are row-major.
std::vector<size_t> length;
std::vector<size_t> istride;
std::vector<size_t> ostride;
size_t nbatch = 1;
fft_precision precision = fft_precision_double;
fft_transform_type transform_type = fft_transform_type_complex_forward;
fft_result_placement placement = fft_placement_inplace;
size_t idist = 0;
size_t odist = 0;
fft_array_type itype = fft_array_type_unset;
fft_array_type otype = fft_array_type_unset;
std::vector<size_t> ioffset = {0, 0};
std::vector<size_t> ooffset = {0, 0};
std::vector<size_t> isize;
std::vector<size_t> osize;
size_t workbuffersize = 0;
// run testing load/store callbacks
bool run_callbacks = false;
static constexpr double load_cb_scalar = 0.457813941;
static constexpr double store_cb_scalar = 0.391504938;
fft_params(){};
virtual ~fft_params(){};
// Given an array type, return the name as a string.
static std::string array_type_name(const fft_array_type type, bool verbose = true)
{
switch(type)
{
case fft_array_type_complex_interleaved:
return verbose ? "fft_array_type_complex_interleaved" : "CI";
case fft_array_type_complex_planar:
return verbose ? "fft_array_type_complex_planar" : "CP";
case fft_array_type_real:
return verbose ? "fft_array_type_real" : "R";
case fft_array_type_hermitian_interleaved:
return verbose ? "fft_array_type_hermitian_interleaved" : "HI";
case fft_array_type_hermitian_planar:
return verbose ? "fft_array_type_hermitian_planar" : "HP";
case fft_array_type_unset:
return verbose ? "fft_array_type_unset" : "UN";
}
return "";
}
std::string transform_type_name() const
{
switch(transform_type)
{
case fft_transform_type_complex_forward:
return "fft_transform_type_complex_forward";
case fft_transform_type_complex_inverse:
return "fft_transform_type_complex_inverse";
case fft_transform_type_real_forward:
return "fft_transform_type_real_forward";
case fft_transform_type_real_inverse:
return "fft_transform_type_real_inverse";
}
}
// Convert to string for output.
std::string str(const std::string& separator = ", ") const
{
std::stringstream ss;
ss << "length:";
for(auto i : length)
ss << " " << i;
ss << separator;
ss << "istride:";
for(auto i : istride)
ss << " " << i;
ss << separator;
ss << "idist: " << idist << separator;
ss << "ostride:";
for(auto i : ostride)
ss << " " << i;
ss << separator;
ss << "odist: " << odist << separator;
ss << "batch: " << nbatch << separator;
ss << "isize:";
for(auto i : isize)
ss << " " << i;
ss << separator;
ss << "osize:";
for(auto i : osize)
ss << " " << i;
ss << separator;
ss << "ioffset:";
for(auto i : ioffset)
ss << " " << i;
ss << separator;
ss << "ooffset:";
for(auto i : ooffset)
ss << " " << i;
ss << separator;
if(placement == fft_placement_inplace)
ss << "in-place";
else
ss << "out-of-place";
ss << separator;
ss << "transform_type: " << transform_type_name() << separator;
ss << array_type_name(itype) << " -> " << array_type_name(otype) << separator;
if(precision == fft_precision_single)
ss << "single-precision";
else
ss << "double-precision";
ss << separator;
ss << "ilength:";
for(const auto i : ilength())
ss << " " << i;
ss << separator;
ss << "olength:";
for(const auto i : olength())
ss << " " << i;
ss << separator;
ss << "ibuffer_size:";
for(const auto i : ibuffer_sizes())
ss << " " << i;
ss << separator;
ss << "obuffer_size:";
for(const auto i : obuffer_sizes())
ss << " " << i;
ss << separator;
return ss.str();
}
// Produce a stringified token of the test fft params.
std::string token() const
{
std::string ret;
switch(transform_type)
{
case fft_transform_type_complex_forward:
ret += "complex_forward_";
break;
case fft_transform_type_complex_inverse:
ret += "complex_inverse_";
break;
case fft_transform_type_real_forward:
ret += "real_forward_";
break;
case fft_transform_type_real_inverse:
ret += "real_inverse_";
break;
}
ret += "len_";
for(auto n : length)
{
ret += std::to_string(n);
ret += "_";
}
switch(precision)
{
case fft_precision_single:
ret += "single_";
break;
case fft_precision_double:
ret += "double_";
break;
}
switch(placement)
{
case fft_placement_inplace:
ret += "ip_";
break;
case fft_placement_notinplace:
ret += "op_";
break;
}
ret += "batch_";
ret += std::to_string(nbatch);
auto append_array_info = [&ret](const std::vector<size_t>& stride, fft_array_type type) {
for(auto s : stride)
{
ret += std::to_string(s);
ret += "_";
}
switch(type)
{
case fft_array_type_complex_interleaved:
ret += "CI";
break;
case fft_array_type_complex_planar:
ret += "CP";
break;
case fft_array_type_real:
ret += "R";
break;
case fft_array_type_hermitian_interleaved:
ret += "HI";
break;
case fft_array_type_hermitian_planar:
ret += "HP";
break;
default:
ret += "UN";
break;
}
};
ret += "_istride_";
append_array_info(istride, itype);
ret += "_ostride_";
append_array_info(ostride, otype);
ret += "_idist_";
ret += std::to_string(idist);
ret += "_odist_";
ret += std::to_string(odist);
ret += "_ioffset";
for(auto n : ioffset)
{
ret += "_";
ret += std::to_string(n);
}
ret += "_ooffset";
for(auto n : ooffset)
{
ret += "_";
ret += std::to_string(n);
}
if(run_callbacks)
ret += "_CB";
return ret;
}
// Set all params from a stringified token.
void from_token(std::string token)
{
std::vector<std::string> vals;
std::string delimiter = "_";
{
size_t pos = 0;
while((pos = token.find(delimiter)) != std::string::npos)
{
auto val = token.substr(0, pos);
vals.push_back(val);
token.erase(0, pos + delimiter.length());
}
vals.push_back(token);
}
auto vector_parser
= [](const std::vector<std::string>& vals, const std::string token, int& pos) {
if(vals[pos++] != token)
throw std::runtime_error("Unable to parse token");
std::vector<size_t> vec;
while(pos < vals.size())
{
if(std::all_of(vals[pos].begin(), vals[pos].end(), ::isdigit))
{
vec.push_back(std::stoi(vals[pos++]));
}
else
{
break;
}
}
return vec;
};
auto type_parser = [](const std::string& val) {
if(val == "CI")
return fft_array_type_complex_interleaved;
else if(val == "CP")
return fft_array_type_complex_planar;
else if(val == "R")
return fft_array_type_real;
else if(val == "HI")
return fft_array_type_hermitian_interleaved;
else if(val == "HP")
return fft_array_type_hermitian_planar;
return fft_array_type_unset;
};
int pos = 0;
bool complex = vals[pos++] == "complex";
bool forward = vals[pos++] == "forward";
if(complex && forward)
transform_type = fft_transform_type_complex_forward;
if(complex && !forward)
transform_type = fft_transform_type_complex_inverse;
if(!complex && forward)
transform_type = fft_transform_type_real_forward;
if(!complex && !forward)
transform_type = fft_transform_type_real_inverse;
length = vector_parser(vals, "len", pos);
if(vals[pos] == "single")
precision = fft_precision_single;
else if(vals[pos] == "double")
precision = fft_precision_double;
pos++;
placement = (vals[pos++] == "ip") ? fft_placement_inplace : fft_placement_notinplace;
if(vals[pos++] != "batch")
throw std::runtime_error("Unable to parse token");
nbatch = std::stoi(vals[pos++]);
istride = vector_parser(vals, "istride", pos);
itype = type_parser(vals[pos]);
pos++;
ostride = vector_parser(vals, "ostride", pos);
otype = type_parser(vals[pos]);
pos++;
if(vals[pos++] != "idist")
throw std::runtime_error("Unable to parse token");
idist = std::stoi(vals[pos++]);
if(vals[pos++] != "odist")
throw std::runtime_error("Unable to parse token");
odist = std::stoi(vals[pos++]);
ioffset = vector_parser(vals, "ioffset", pos);
ooffset = vector_parser(vals, "ooffset", pos);
if(pos < vals.size())
run_callbacks = vals[pos] == "CB";
}
// Stream output operator (for gtest, etc).
friend std::ostream& operator<<(std::ostream& stream, const fft_params& params)
{
stream << params.str();
return stream;
}
// Dimension of the transform.
size_t dim() const
{
return length.size();
}
std::vector<size_t> ilength() const
{
auto ilength = length;
if(transform_type == fft_transform_type_real_inverse)
ilength[dim() - 1] = ilength[dim() - 1] / 2 + 1;
return ilength;
}
std::vector<size_t> olength() const
{
auto olength = length;
if(transform_type == fft_transform_type_real_forward)
olength[dim() - 1] = olength[dim() - 1] / 2 + 1;
return olength;
}
static size_t nbuffer(const fft_array_type type)
{
switch(type)
{
case fft_array_type_real:
case fft_array_type_complex_interleaved:
case fft_array_type_hermitian_interleaved:
return 1;
case fft_array_type_complex_planar:
case fft_array_type_hermitian_planar:
return 2;
case fft_array_type_unset:
return 0;
}
}
// Number of input buffers
size_t nibuffer() const
{
return nbuffer(itype);
}
// Number of output buffers
size_t nobuffer() const
{
return nbuffer(otype);
}
void set_iotypes()
{
if(itype == fft_array_type_unset)
{
switch(transform_type)
{
case fft_transform_type_complex_forward:
case fft_transform_type_complex_inverse:
itype = fft_array_type_complex_interleaved;
break;
case fft_transform_type_real_forward:
itype = fft_array_type_real;
break;
case fft_transform_type_real_inverse:
itype = fft_array_type_hermitian_interleaved;
break;
default:
throw std::runtime_error("Invalid transform type");
}
}
if(otype == fft_array_type_unset)
{
switch(transform_type)
{
case fft_transform_type_complex_forward:
case fft_transform_type_complex_inverse:
otype = fft_array_type_complex_interleaved;
break;
case fft_transform_type_real_forward:
otype = fft_array_type_hermitian_interleaved;
break;
case fft_transform_type_real_inverse:
otype = fft_array_type_real;
break;
default:
throw std::runtime_error("Invalid transform type");
}
}
}
// Check that the input and output types are consistent.
bool check_iotypes() const
{
switch(itype)
{
case fft_array_type_complex_interleaved:
case fft_array_type_complex_planar:
case fft_array_type_hermitian_interleaved:
case fft_array_type_hermitian_planar:
case fft_array_type_real:
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
switch(otype)
{
case fft_array_type_complex_interleaved:
case fft_array_type_complex_planar:
case fft_array_type_hermitian_interleaved:
case fft_array_type_hermitian_planar:
case fft_array_type_real:
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
// Check that format choices are supported
if(transform_type != fft_transform_type_real_forward
&& transform_type != fft_transform_type_real_inverse)
{
if(placement == fft_placement_inplace && itype != otype)
{
throw std::runtime_error(
"In-place transforms must have identical input and output types");
}
}
bool okformat = true;
switch(itype)
{
case fft_array_type_complex_interleaved:
case fft_array_type_complex_planar:
okformat = (otype == fft_array_type_complex_interleaved
|| otype == fft_array_type_complex_planar);
break;
case fft_array_type_hermitian_interleaved:
case fft_array_type_hermitian_planar:
okformat = otype == fft_array_type_real;
break;
case fft_array_type_real:
okformat = (otype == fft_array_type_hermitian_interleaved
|| otype == fft_array_type_hermitian_planar);
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
return okformat;
}
// Given a length vector, set the rest of the strides.
// The optional argument stride0 sets the stride for the contiguous dimension.
// The optional rcpadding argument sets the stride correctly for in-place
// multi-dimensional real/complex transforms.
// Format is row-major.
template <typename T1>
std::vector<T1> compute_stride(const std::vector<T1>& length,
const std::vector<size_t>& stride0 = std::vector<size_t>(),
const bool rcpadding = false) const
{
std::vector<T1> stride(dim());
size_t dimoffset = 0;
if(stride0.size() == 0)
{
// Set the contiguous stride:
stride[dim() - 1] = 1;
dimoffset = 1;
}
else
{
// Copy the input values to the end of the stride array:
for(size_t i = 0; i < stride0.size(); ++i)
{
stride[dim() - stride0.size() + i] = stride0[i];
}
}
if(stride0.size() < dim())
{
// Compute any remaining values via recursion.
for(size_t i = dim() - dimoffset - stride0.size(); i-- > 0;)
{
auto lengthip1 = length[i + 1];
if(rcpadding && i == dim() - 2)
{
lengthip1 = 2 * (lengthip1 / 2 + 1);
}
stride[i] = stride[i + 1] * lengthip1;
}
}
return stride;
}
void compute_istride()
{
istride = compute_stride(ilength(),
istride,
placement == fft_placement_inplace
&& transform_type == fft_transform_type_real_forward);
}
void compute_ostride()
{
ostride = compute_stride(olength(),
ostride,
placement == fft_placement_inplace
&& transform_type == fft_transform_type_real_inverse);
}
void compute_isize()
{
auto il = ilength();
size_t val = compute_ptrdiff(il, istride, nbatch, idist);
isize.resize(nibuffer());
for(int i = 0; i < isize.size(); ++i)
{
isize[i] = val + ioffset[i];
}
}
void compute_osize()
{
auto ol = olength();
size_t val = compute_ptrdiff(ol, ostride, nbatch, odist);
osize.resize(nobuffer());
for(int i = 0; i < osize.size(); ++i)
{
osize[i] = val + ooffset[i];
}
}
std::vector<size_t> ibuffer_sizes() const
{
std::vector<size_t> ibuffer_sizes;
// In-place real-to-complex transforms need to have enough space in the input buffer to
// accomadate the output, which is slightly larger.
if(placement == fft_placement_inplace && transform_type == fft_transform_type_real_forward)
{
return obuffer_sizes();
}
if(isize.empty())
return ibuffer_sizes;
switch(itype)
{
case fft_array_type_complex_planar:
case fft_array_type_hermitian_planar:
ibuffer_sizes.resize(2);
break;
default:
ibuffer_sizes.resize(1);
}
for(unsigned i = 0; i < ibuffer_sizes.size(); i++)
{
ibuffer_sizes[i] = isize[i] * var_size<size_t>(precision, itype);
}
return ibuffer_sizes;
}
std::vector<size_t> obuffer_sizes() const
{
std::vector<size_t> obuffer_sizes;
if(osize.empty())
return obuffer_sizes;
switch(otype)
{
case fft_array_type_complex_planar:
case fft_array_type_hermitian_planar:
obuffer_sizes.resize(2);
break;
default:
obuffer_sizes.resize(1);
}
for(unsigned i = 0; i < obuffer_sizes.size(); i++)
{
obuffer_sizes[i] = osize[i] * var_size<size_t>(precision, otype);
}
return obuffer_sizes;
}
// Compute the idist for a given transform based on the placeness, transform type, and data
// layout.
void set_idist()
{
if(idist != 0)
return;
// In-place 1D transforms need extra dist.
if(transform_type == fft_transform_type_real_forward && dim() == 1
&& placement == fft_placement_inplace)
{
idist = 2 * (length[0] / 2 + 1) * istride[0];
return;
}
if(transform_type == fft_transform_type_real_inverse && dim() == 1)
{
idist = (length[0] / 2 + 1) * istride[0];
return;
}
idist = (transform_type == fft_transform_type_real_inverse)
? (length[dim() - 1] / 2 + 1) * istride[dim() - 1]
: length[dim() - 1] * istride[dim() - 1];
for(int i = 0; i < dim() - 1; ++i)
{
idist = std::max(length[i] * istride[i], idist);
}
}
// Compute the odist for a given transform based on the placeness, transform type, and data
// layout. Row-major.
void set_odist()
{
if(odist != 0)
return;
// In-place 1D transforms need extra dist.
if(transform_type == fft_transform_type_real_inverse && dim() == 1
&& placement == fft_placement_inplace)
{
odist = 2 * (length[0] / 2 + 1) * ostride[0];
return;
}
if(transform_type == fft_transform_type_real_forward && dim() == 1)
{
odist = (length[0] / 2 + 1) * ostride[0];
return;
}
odist = (transform_type == fft_transform_type_real_forward)
? (length[dim() - 1] / 2 + 1) * ostride[dim() - 1]
: length[dim() - 1] * ostride[dim() - 1];
for(int i = 0; i < dim() - 1; ++i)
{
odist = std::max(length[i] * ostride[i], odist);
}
}
// Return true if the given GPU parameters would produce a valid transform.
bool valid(const int verbose) const
{
if(ioffset.size() < nibuffer() || ooffset.size() < nobuffer())
return false;
// Check that in-place transforms have the same input and output stride:
if(placement == fft_placement_inplace)
{
const auto stridesize = std::min(istride.size(), ostride.size());
bool samestride = true;
for(int i = 0; i < stridesize; ++i)
{
if(istride[i] != ostride[i])
samestride = false;
}
if((transform_type == fft_transform_type_complex_forward
|| transform_type == fft_transform_type_complex_inverse)
&& !samestride)
{
// In-place transforms require identical input and output strides.
if(verbose)
{
std::cout << "istride:";
for(const auto& i : istride)
std::cout << " " << i;
std::cout << " ostride0:";
for(const auto& i : ostride)
std::cout << " " << i;
std::cout << " differ; skipped for in-place transforms: skipping test"
<< std::endl;
}
return false;
}
if((transform_type == fft_transform_type_complex_forward
|| transform_type == fft_transform_type_complex_inverse)
&& (idist != odist))
{
// In-place transforms require identical distance
if(verbose)
{
std::cout << "idist:" << idist << " odist:" << odist
<< " differ; skipped for in-place transforms: skipping test"
<< std::endl;
}
return false;
}
if((transform_type == fft_transform_type_real_forward
|| transform_type == fft_transform_type_real_inverse)
&& (istride.back() != 1 || ostride.back() != 1))
{
// In-place real/complex transforms require unit strides.
if(verbose)
{
std::cout
<< "istride.back(): " << istride.back()
<< " ostride.back(): " << ostride.back()
<< " must be unitary for in-place real/complex transforms: skipping test"
<< std::endl;
}
return false;
}
if((itype == fft_array_type_complex_interleaved
&& otype == fft_array_type_complex_planar)
|| (itype == fft_array_type_complex_planar
&& otype == fft_array_type_complex_interleaved))
{
if(verbose)
{
std::cout << "In-place c2c transforms require identical io types; skipped.\n";
}
return false;
}
// Check offsets
switch(transform_type)
{
case fft_transform_type_complex_forward:
case fft_transform_type_complex_inverse:
for(int i = 0; i < nibuffer(); ++i)
{
if(ioffset[i] != ooffset[i])
return false;
}
break;
case fft_transform_type_real_forward:
if(ioffset[0] != 2 * ooffset[0])
return false;
break;
case fft_transform_type_real_inverse:
if(2 * ioffset[0] != ooffset[0])
return false;
break;
}
}
if(!check_iotypes())
return false;
// The parameters are valid.
return true;
}
// Fill in any missing parameters.
void validate()
{
set_iotypes();
compute_istride();
compute_ostride();
set_idist();
set_odist();
compute_isize();
compute_osize();
}
// Column-major getters:
std::vector<size_t> length_cm() const
{
auto length_cm = length;
std::reverse(std::begin(length_cm), std::end(length_cm));
return length_cm;
}
std::vector<size_t> istride_cm() const
{
auto istride_cm = istride;
std::reverse(std::begin(istride_cm), std::end(istride_cm));
return istride_cm;
}
std::vector<size_t> ostride_cm() const
{
auto ostride_cm = ostride;
std::reverse(std::begin(ostride_cm), std::end(ostride_cm));
return ostride_cm;
}
template <typename Tallocator, typename Tstream = std::ostream>
void print_ibuffer(const std::vector<std::vector<char, Tallocator>>& buf,
Tstream& stream = std::cout) const
{
switch(itype)
{
case fft_array_type_complex_interleaved:
case fft_array_type_hermitian_interleaved:
{
switch(precision)
{
case fft_precision_single:
{
buffer_printer<std::complex<float>> s;
s.print_buffer(buf, ilength(), istride, nbatch, idist, ioffset);
break;
}
case fft_precision_double:
{
buffer_printer<std::complex<double>> s;
s.print_buffer(buf, ilength(), istride, nbatch, idist, ioffset);
break;
}
}
break;
}
case fft_array_type_complex_planar:
case fft_array_type_hermitian_planar:
case fft_array_type_real:
{
switch(precision)
{
case fft_precision_single:
{
buffer_printer<float> s;
s.print_buffer(buf, ilength(), istride, nbatch, idist, ioffset);
break;
}
case fft_precision_double:
{
buffer_printer<double> s;
s.print_buffer(buf, ilength(), istride, nbatch, idist, ioffset);
break;
}
}
break;
}
default:
throw std::runtime_error("Invalid itype in print_ibuffer");
}
}
template <typename Tallocator, typename Tstream = std::ostream>
void print_obuffer(const std::vector<std::vector<char, Tallocator>>& buf,
Tstream& stream = std::cout) const
{
switch(otype)
{
case fft_array_type_complex_interleaved:
case fft_array_type_hermitian_interleaved:
{
switch(precision)
{
case fft_precision_single:
{
buffer_printer<std::complex<float>> s;
s.print_buffer(buf, olength(), ostride, nbatch, odist, ooffset);
break;
}
case fft_precision_double:
buffer_printer<std::complex<double>> s;
s.print_buffer(buf, olength(), ostride, nbatch, odist, ooffset);
break;
}
break;
}
case fft_array_type_complex_planar:
case fft_array_type_hermitian_planar:
case fft_array_type_real:
{
switch(precision)
{
case fft_precision_single:
{
buffer_printer<float> s;
s.print_buffer(buf, olength(), ostride, nbatch, odist, ooffset);
break;
}
case fft_precision_double:
{
buffer_printer<double> s;
s.print_buffer(buf, olength(), ostride, nbatch, odist, ooffset);
break;
}
}
break;
}
default:
throw std::runtime_error("Invalid itype in print_obuffer");
}
}
template <typename Tallocator>
void print_ibuffer_flat(const std::vector<std::vector<char, Tallocator>>& buf) const
{
switch(itype)
{
case fft_array_type_complex_interleaved:
case fft_array_type_hermitian_interleaved:
{
switch(precision)
{
case fft_precision_single:
{
buffer_printer<std::complex<float>> s;
s.print_buffer_flat(buf, osize, ooffset);
break;
}
case fft_precision_double:
buffer_printer<std::complex<double>> s;
s.print_buffer_flat(buf, osize, ooffset);
break;
}
break;
}
case fft_array_type_complex_planar:
case fft_array_type_hermitian_planar:
case fft_array_type_real:
{
switch(precision)
{
case fft_precision_single:
{
buffer_printer<float> s;
s.print_buffer_flat(buf, osize, ooffset);
break;
}
case fft_precision_double:
{
buffer_printer<double> s;
s.print_buffer_flat(buf, osize, ooffset);
break;
}
}
break;
default:
throw std::runtime_error("Invalid itype in print_ibuffer_flat");
}
}
}
template <typename Tallocator>
void print_obuffer_flat(const std::vector<std::vector<char, Tallocator>>& buf) const
{
switch(otype)
{
case fft_array_type_complex_interleaved:
case fft_array_type_hermitian_interleaved:
{
switch(precision)
{
case fft_precision_single:
{
buffer_printer<std::complex<float>> s;
s.print_buffer_flat(buf, osize, ooffset);
break;
}
case fft_precision_double:
buffer_printer<std::complex<double>> s;
s.print_buffer_flat(buf, osize, ooffset);
break;
}
break;
}
case fft_array_type_complex_planar:
case fft_array_type_hermitian_planar:
case fft_array_type_real:
{
switch(precision)
{
case fft_precision_single:
{
buffer_printer<float> s;
s.print_buffer_flat(buf, osize, ooffset);
break;
}
case fft_precision_double:
{
buffer_printer<double> s;
s.print_buffer_flat(buf, osize, ooffset);
break;
}
}
break;
default:
throw std::runtime_error("Invalid itype in print_ibuffer_flat");
}
}
}
virtual fft_status set_callbacks(void* load_cb_host,
void* load_cb_data,
void* store_cb_host,
void* store_cb_data)
{
return fft_status_success;
}
virtual fft_status execute(void** in, void** out)
{
return fft_status_success;
};
size_t fft_params_vram_footprint()
{
return fft_params::vram_footprint();
}
virtual size_t vram_footprint()
{
const auto ibuf_size = ibuffer_sizes();
size_t val = std::accumulate(ibuf_size.begin(), ibuf_size.end(), (size_t)1);
if(placement == fft_placement_notinplace)
{
const auto obuf_size = obuffer_sizes();
val += std::accumulate(obuf_size.begin(), obuf_size.end(), (size_t)1);
}
return val;
}
// Specific exception type for work buffer allocation failure.
// Tests that hit this can't fit on the GPU and should be skipped.
struct work_buffer_alloc_failure : public std::runtime_error
{
work_buffer_alloc_failure(const std::string& s)
: std::runtime_error(s)
{
}
};
virtual fft_status create_plan()
{
return fft_status_success;
}
};
// This is used with the program_options class so that the user can type an integer on the
// command line and we store into an enum varaible
template <typename _Elem, typename _Traits>
std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream,
fft_array_type& atype)
{
unsigned tmp;
stream >> tmp;
atype = fft_array_type(tmp);
return stream;
}
// similarly for transform type
template <typename _Elem, typename _Traits>
std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream,
fft_transform_type& ttype)
{
unsigned tmp;
stream >> tmp;
ttype = fft_transform_type(tmp);
return stream;
}
// count the number of total iterations for 1-, 2-, and 3-D dimensions
template <typename T1>
size_t count_iters(const T1& i)
{
return i;
}
template <typename T1>
size_t count_iters(const std::tuple<T1, T1>& i)
{
return std::get<0>(i) * std::get<1>(i);
}
template <typename T1>
size_t count_iters(const std::tuple<T1, T1, T1>& i)
{
return std::get<0>(i) * std::get<1>(i) * std::get<2>(i);
}
// Work out how many partitions to break our iteration problem into
template <typename T1>
static size_t compute_partition_count(T1 length)
{
#ifdef BUILD_CLIENTS_TESTS_OPENMP
// we seem to get contention from too many threads, which slows
// things down. particularly noticeable with mix_3D tests
static const size_t MAX_PARTITIONS = 8;
size_t iters = count_iters(length);
size_t hw_threads = std::min(MAX_PARTITIONS, static_cast<size_t>(omp_get_num_procs()));
if(!hw_threads)
return 1;
// don't bother threading problem sizes that are too small. pick
// an arbitrary number of iterations and ensure that each thread
// has at least that many iterations to process
static const size_t MIN_ITERS_PER_THREAD = 2048;
// either use the whole CPU, or use ceil(iters/iters_per_thread)
return std::min(hw_threads, (iters + MIN_ITERS_PER_THREAD + 1) / MIN_ITERS_PER_THREAD);
#else
return 1;
#endif
}
// Break a scalar length into some number of pieces, returning
// [(start0, end0), (start1, end1), ...]
template <typename T1>
std::vector<std::pair<T1, T1>> partition_base(const T1& length, size_t num_parts)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
// make sure we don't exceed the length
num_parts = std::min(length, num_parts);
std::vector<std::pair<T1, T1>> ret(num_parts);
auto partition_size = length / num_parts;
T1 cur_partition = 0;
for(size_t i = 0; i < num_parts; ++i, cur_partition += partition_size)
{
ret[i].first = cur_partition;
ret[i].second = cur_partition + partition_size;
}
// last partition might not divide evenly, fix it up
ret.back().second = length;
return ret;
}
// Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths
template <typename T1>
std::vector<std::pair<T1, T1>> partition_rowmajor(const T1& length)
{
return partition_base(length, compute_partition_count(length));
}
// Partition on the leftmost part of the tuple, for row-major indexing
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>>
partition_rowmajor(const std::tuple<T1, T1>& length)
{
auto partitions = partition_base(std::get<0>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<0>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<0>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
}
return ret;
}
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>>
partition_rowmajor(const std::tuple<T1, T1, T1>& length)
{
auto partitions = partition_base(std::get<0>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<0>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<2>(ret[i].first) = 0;
std::get<0>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
std::get<2>(ret[i].second) = std::get<2>(length);
}
return ret;
}
// Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths
template <typename T1>
std::vector<std::pair<T1, T1>> partition_colmajor(const T1& length)
{
return partition_base(length, compute_partition_count(length));
}
// Partition on the rightmost part of the tuple, for col-major indexing
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>>
partition_colmajor(const std::tuple<T1, T1>& length)
{
auto partitions = partition_base(std::get<1>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<1>(ret[i].first) = partitions[i].first;
std::get<0>(ret[i].first) = 0;
std::get<1>(ret[i].second) = partitions[i].second;
std::get<0>(ret[i].second) = std::get<0>(length);
}
return ret;
}
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>>
partition_colmajor(const std::tuple<T1, T1, T1>& length)
{
auto partitions = partition_base(std::get<2>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<2>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<0>(ret[i].first) = 0;
std::get<2>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
std::get<0>(ret[i].second) = std::get<0>(length);
}
return ret;
}
// Specialized computation of index given 1-, 2-, 3- dimension length + stride
template <typename T1, typename T2>
size_t compute_index(T1 length, T2 stride, size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (length * stride) + base;
}
template <typename T1, typename T2>
size_t
compute_index(const std::tuple<T1, T1>& length, const std::tuple<T2, T2>& stride, size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride))
+ base;
}
template <typename T1, typename T2>
size_t compute_index(const std::tuple<T1, T1, T1>& length,
const std::tuple<T2, T2, T2>& stride,
size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride))
+ (std::get<2>(length) * std::get<2>(stride)) + base;
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input and output
// types are identical.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_1to1(const Tval* input,
Tval* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output[odx + ooffset[0]] = input[idx + ioffset[0]];
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type is
// planar and the output type is complex interleaved.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_2to1(const Tval* input0,
const Tval* input1,
std::complex<Tval>* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output[odx + ooffset[0]]
= std::complex<Tval>(input0[idx + ioffset[0]], input1[idx + ioffset[1]]);
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type is
// complex interleaved and the output type is planar.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_1to2(const std::complex<Tval>* input,
Tval* output0,
Tval* output1,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output0[odx + ooffset[0]] = input[idx + ioffset[0]].real();
output1[odx + ooffset[1]] = input[idx + ioffset[0]].imag();
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type given
// by itype, and the output type is given by otype.
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input,
std::vector<std::vector<char, Tallocator2>>& output,
const Tint1& length,
const size_t nbatch,
const fft_precision precision,
const fft_array_type itype,
const Tint2& istride,
const size_t idist,
const fft_array_type otype,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
if(itype == otype)
{
switch(itype)
{
case fft_array_type_complex_interleaved:
case fft_array_type_hermitian_interleaved:
switch(precision)
{
case fft_precision_single:
copy_buffers_1to1(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case fft_precision_double:
copy_buffers_1to1(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
break;
case fft_array_type_real:
case fft_array_type_complex_planar:
case fft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
switch(precision)
{
case fft_precision_single:
copy_buffers_1to1(reinterpret_cast<const float*>(input[idx].data()),
reinterpret_cast<float*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case fft_precision_double:
copy_buffers_1to1(reinterpret_cast<const double*>(input[idx].data()),
reinterpret_cast<double*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
break;
default:
throw std::runtime_error("Invalid data type");
}
}
else if((itype == fft_array_type_complex_interleaved && otype == fft_array_type_complex_planar)
|| (itype == fft_array_type_hermitian_interleaved
&& otype == fft_array_type_hermitian_planar))
{
// copy 1to2
switch(precision)
{
case fft_precision_single:
copy_buffers_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<float*>(output[0].data()),
reinterpret_cast<float*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case fft_precision_double:
copy_buffers_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<double*>(output[0].data()),
reinterpret_cast<double*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
else if((itype == fft_array_type_complex_planar && otype == fft_array_type_complex_interleaved)
|| (itype == fft_array_type_hermitian_planar
&& otype == fft_array_type_hermitian_interleaved))
{
// copy 2 to 1
switch(precision)
{
case fft_precision_single:
copy_buffers_2to1(reinterpret_cast<const float*>(input[0].data()),
reinterpret_cast<const float*>(input[1].data()),
reinterpret_cast<std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case fft_precision_double:
copy_buffers_2to1(reinterpret_cast<const double*>(input[0].data()),
reinterpret_cast<const double*>(input[1].data()),
reinterpret_cast<std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
else
{
throw std::runtime_error("Invalid input and output types.");
}
}
// unroll arbitrary-dimension copy_buffers into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input,
std::vector<std::vector<char, Tallocator2>>& output,
const std::vector<Tint1>& length,
const size_t nbatch,
const fft_precision precision,
const fft_array_type itype,
const std::vector<Tint2>& istride,
const size_t idist,
const fft_array_type otype,
const std::vector<Tint3>& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
switch(length.size())
{
case 1:
return copy_buffers(input,
output,
length[0],
nbatch,
precision,
itype,
istride[0],
idist,
otype,
ostride[0],
odist,
ioffset,
ooffset);
case 2:
return copy_buffers(input,
output,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1]),
odist,
ioffset,
ooffset);
case 3:
return copy_buffers(input,
output,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1], ostride[2]),
odist,
ioffset,
ooffset);
default:
abort();
}
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. Both buffers are of complex type.
struct VectorNorms
{
double l_2 = 0.0, l_inf = 0.0;
};
template <typename Tcomplex, typename Tint1, typename Tint2, typename Tint3>
inline VectorNorms distance_1to1_complex(const Tcomplex* input,
const Tcomplex* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_colmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double rdiff
= std::abs(output[odx + ooffset[0]].real() - input[idx + ioffset[0]].real());
cur_linf = std::max(rdiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += rdiff * rdiff;
const double idiff
= std::abs(output[odx + ooffset[0]].imag() - input[idx + ioffset[0]].imag());
cur_linf = std::max(idiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += idiff * idiff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. Both buffers are of real type.
template <typename Tfloat, typename Tint1, typename Tint2, typename Tint3>
inline VectorNorms distance_1to1_real(const Tfloat* input,
const Tfloat* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double diff = std::abs(output[odx + ooffset[0]] - input[idx + ioffset[0]]);
cur_linf = std::max(diff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += diff * diff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. input is complex-interleaved, output is complex-planar.
template <typename Tval, typename Tint1, typename T2, typename T3>
inline VectorNorms distance_1to2(const std::complex<Tval>* input,
const Tval* output0,
const Tval* output1,
const Tint1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const T3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double rdiff
= std::abs(output0[odx + ooffset[0]] - input[idx + ioffset[0]].real());
cur_linf = std::max(rdiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += rdiff * rdiff;
const double idiff
= std::abs(output1[odx + ooffset[1]] - input[idx + ioffset[0]].imag());
cur_linf = std::max(idiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += idiff * idiff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-inifnity and L-2 distance between two buffers of dimension length and
// with types given by itype, otype, and precision.
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<std::vector<char, Tallocator2>>& output,
const Tint1& length,
const size_t nbatch,
const fft_precision precision,
const fft_array_type itype,
const Tint2& istride,
const size_t idist,
const fft_array_type otype,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
VectorNorms dist;
if(itype == otype)
{
switch(itype)
{
case fft_array_type_complex_interleaved:
case fft_array_type_hermitian_interleaved:
switch(precision)
{
case fft_precision_single:
dist = distance_1to1_complex(
reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<const std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case fft_precision_double:
dist = distance_1to1_complex(
reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<const std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
break;
case fft_array_type_real:
case fft_array_type_complex_planar:
case fft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
VectorNorms d;
switch(precision)
{
case fft_precision_single:
d = distance_1to1_real(reinterpret_cast<const float*>(input[idx].data()),
reinterpret_cast<const float*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case fft_precision_double:
d = distance_1to1_real(reinterpret_cast<const double*>(input[idx].data()),
reinterpret_cast<const double*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_inf = std::max(d.l_inf, dist.l_inf);
dist.l_2 += d.l_2 * d.l_2;
}
break;
default:
throw std::runtime_error("Invalid input and output types.");
}
}
else if((itype == fft_array_type_complex_interleaved && otype == fft_array_type_complex_planar)
|| (itype == fft_array_type_hermitian_interleaved
&& otype == fft_array_type_hermitian_planar))
{
switch(precision)
{
case fft_precision_single:
dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<const float*>(output[0].data()),
reinterpret_cast<const float*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case fft_precision_double:
dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<const double*>(output[0].data()),
reinterpret_cast<const double*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
}
else if((itype == fft_array_type_complex_planar && otype == fft_array_type_complex_interleaved)
|| (itype == fft_array_type_hermitian_planar
&& otype == fft_array_type_hermitian_interleaved))
{
switch(precision)
{
case fft_precision_single:
dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(output[0].data()),
reinterpret_cast<const float*>(input[0].data()),
reinterpret_cast<const float*>(input[1].data()),
length,
nbatch,
ostride,
odist,
istride,
idist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case fft_precision_double:
dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(output[0].data()),
reinterpret_cast<const double*>(input[0].data()),
reinterpret_cast<const double*>(input[1].data()),
length,
nbatch,
ostride,
odist,
istride,
idist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
}
else
{
throw std::runtime_error("Invalid input and output types.");
}
dist.l_2 = sqrt(dist.l_2);
return dist;
}
// Unroll arbitrary-dimension distance into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<std::vector<char, Tallocator2>>& output,
const std::vector<Tint1>& length,
const size_t nbatch,
const fft_precision precision,
const fft_array_type itype,
const std::vector<Tint2>& istride,
const size_t idist,
const fft_array_type otype,
const std::vector<Tint3>& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
switch(length.size())
{
case 1:
return distance(input,
output,
length[0],
nbatch,
precision,
itype,
istride[0],
idist,
otype,
ostride[0],
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
case 2:
return distance(input,
output,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1]),
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
case 3:
return distance(input,
output,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1], ostride[2]),
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
default:
abort();
}
}
// Compute the L-infinity and L-2 norm of a buffer with strides istride and
// length idist. Data is std::complex.
template <typename Tcomplex, typename T1, typename T2>
inline VectorNorms norm_complex(const Tcomplex* input,
const T1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
double linf = 0.0;
double l2 = 0.0;
size_t idx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const double rval = std::abs(input[idx + offset[0]].real());
cur_linf = std::max(rval, cur_linf);
cur_l2 += rval * rval;
const double ival = std::abs(input[idx + offset[0]].imag());
cur_linf = std::max(ival, cur_linf);
cur_l2 += ival * ival;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 norm of abuffer with strides istride and
// length idist. Data is real-valued.
template <typename Tfloat, typename T1, typename T2>
inline VectorNorms norm_real(const Tfloat* input,
const T1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
double linf = 0.0;
double l2 = 0.0;
size_t idx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const double val = std::abs(input[idx + offset[0]]);
cur_linf = std::max(val, cur_linf);
cur_l2 += val * val;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 norm of abuffer with strides istride and
// length idist. Data format is given by precision and itype.
template <typename Tallocator1, typename T1, typename T2>
inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input,
const T1& length,
const size_t nbatch,
const fft_precision precision,
const fft_array_type itype,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
VectorNorms norm;
switch(itype)
{
case fft_array_type_complex_interleaved:
case fft_array_type_hermitian_interleaved:
switch(precision)
{
case fft_precision_single:
norm = norm_complex(reinterpret_cast<const std::complex<float>*>(input[0].data()),
length,
nbatch,
istride,
idist,
offset);
break;
case fft_precision_double:
norm = norm_complex(reinterpret_cast<const std::complex<double>*>(input[0].data()),
length,
nbatch,
istride,
idist,
offset);
break;
}
norm.l_2 *= norm.l_2;
break;
case fft_array_type_real:
case fft_array_type_complex_planar:
case fft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
VectorNorms n;
switch(precision)
{
case fft_precision_single:
n = norm_real(reinterpret_cast<const float*>(input[idx].data()),
length,
nbatch,
istride,
idist,
offset);
break;
case fft_precision_double:
n = norm_real(reinterpret_cast<const double*>(input[idx].data()),
length,
nbatch,
istride,
idist,
offset);
break;
}
norm.l_inf = std::max(n.l_inf, norm.l_inf);
norm.l_2 += n.l_2 * n.l_2;
}
break;
default:
throw std::runtime_error("Invalid data type");
}
norm.l_2 = sqrt(norm.l_2);
return norm;
}
// Unroll arbitrary-dimension norm into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1, typename T1, typename T2>
inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<T1>& length,
const size_t nbatch,
const fft_precision precision,
const fft_array_type type,
const std::vector<T2>& stride,
const size_t dist,
const std::vector<size_t>& offset)
{
switch(length.size())
{
case 1:
return norm(input, length[0], nbatch, precision, type, stride[0], dist, offset);
case 2:
return norm(input,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
type,
std::make_tuple(stride[0], stride[1]),
dist,
offset);
case 3:
return norm(input,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
type,
std::make_tuple(stride[0], stride[1], stride[2]),
dist,
offset);
default:
abort();
}
}
// Given a buffer of complex values stored in a vector of chars (or two vectors in the
// case of planar format), impose Hermitian symmetry.
// NB: length is the dimensions of the FFT, not the data layout dimensions.
template <typename Tfloat, typename Tallocator, typename Tsize>
inline void impose_hermitian_symmetry(std::vector<std::vector<char, Tallocator>>& vals,
const std::vector<Tsize>& length,
const std::vector<Tsize>& istride,
const Tsize idist,
const Tsize nbatch)
{
switch(vals.size())
{
case 1:
{
// Complex interleaved data
for(auto ibatch = 0; ibatch < nbatch; ++ibatch)
{
auto data = ((std::complex<Tfloat>*)vals[0].data()) + ibatch * idist;
switch(length.size())
{
case 3:
if(length[2] % 2 == 0)
{
data[istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[2] % 2 == 0)
{
data[istride[0] * (length[0] / 2) + istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[1] % 2 == 0 && length[2] % 2 == 0)
{
data[istride[1] * (length[1] / 2) + istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[1] % 2 == 0 && length[2] % 2 == 0)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2)
+ istride[2] * (length[2] / 2)]
.imag(0.0);
// clang format off
}
// y-axis:
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
data[istride[1] * (length[1] - j)] = std::conj(data[istride[1] * j]);
}
if(length[0] % 2 == 0)
{
// y-axis at x-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j)]
= std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j]);
// clang format on
}
}
// x-axis:
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]);
}
if(length[1] % 2 == 0)
{
// x-axis at y-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)]
= std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]);
// clang format on
}
}
// x-y plane:
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
for(auto j = 1; j < length[1]; ++j)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j)]
= std::conj(data[istride[0] * i + istride[1] * j]);
// clang format on
}
}
if(length[2] % 2 == 0)
{
// x-axis at z-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]);
}
if(length[1] % 2 == 0)
{
// x-axis at yz-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]);
}
}
// y-axis: at z-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
data[istride[1] * (length[1] - j) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[1] * j + istride[2] * (length[2] / 2)]);
}
if(length[0] % 2 == 0)
{
// y-axis: at xz-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j)
+ istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j
+ istride[2] * (length[2] / 2)]);
// clang format on
}
}
// x-y plane: at z-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
for(auto j = 1; j < length[1]; ++j)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j)
+ istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[1] * j
+ istride[2] * (length[2] / 2)]);
// clang format on
}
}
}
[[fallthrough]];
case 2:
if(length[1] % 2 == 0)
{
data[istride[1] * (length[1] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[1] % 2 == 0)
{
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2)].imag(0.0);
}
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]);
}
if(length[1] % 2 == 0)
{
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)]
= std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]);
}
}
[[fallthrough]];
case 1:
data[0].imag(0.0);
if(length[0] % 2 == 0)
{
data[istride[0] * (length[0] / 2)].imag(0.0);
}
break;
default:
throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry");
}
}
break;
}
case 2:
{
// Complex planar data
for(auto ibatch = 0; ibatch < nbatch; ++ibatch)
{
auto idata = ((Tfloat*)vals[1].data()) + ibatch * idist;
switch(length.size())
{
case 3:
throw std::runtime_error("Not implemented");
// TODO: implement
case 2:
throw std::runtime_error("Not implemented");
// TODO: implement
case 1:
idata[0] = 0.0;
if(length[0] % 2 == 0)
{
idata[istride[0] * (length[0] / 2)] = 0.0;
}
break;
default:
throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry");
}
}
break;
}
default:
throw std::runtime_error("Invalid data type");
}
}
// Given an array type and transform length, strides, etc, load random floats in [0,1]
// into the input array of floats/doubles or complex floats/doubles, which is stored in a
// vector of chars (or two vectors in the case of planar format).
// lengths are the memory lengths (ie not the transform parameters)
template <typename Tfloat, typename Tallocator, typename Tint1>
inline void set_input(std::vector<std::vector<char, Tallocator>>& input,
const fft_array_type itype,
const Tint1& whole_length,
const Tint1& istride,
const size_t idist,
const size_t nbatch)
{
switch(itype)
{
case fft_array_type_complex_interleaved:
case fft_array_type_hermitian_interleaved:
{
auto idata = (std::complex<Tfloat>*)input[0].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const auto i = compute_index(index, istride, i_base);
const Tfloat x = (Tfloat)gen() / (Tfloat)gen.max();
const Tfloat y = (Tfloat)gen() / (Tfloat)gen.max();
const std::complex<Tfloat> val(x, y);
idata[i] = val;
} while(increment_rowmajor(index, length));
}
}
break;
}
case fft_array_type_complex_planar:
case fft_array_type_hermitian_planar:
{
auto ireal = (Tfloat*)input[0].data();
auto iimag = (Tfloat*)input[1].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const auto i = compute_index(index, istride, i_base);
const std::complex<Tfloat> val((Tfloat)gen() / (Tfloat)gen.max(),
(Tfloat)gen() / (Tfloat)gen.max());
ireal[i] = val.real();
iimag[i] = val.imag();
} while(increment_rowmajor(index, length));
}
}
break;
}
case fft_array_type_real:
{
auto idata = (Tfloat*)input[0].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const auto i = compute_index(index, istride, i_base);
const Tfloat val = (Tfloat)gen() / (Tfloat)gen.max();
idata[i] = val;
} while(increment_rowmajor(index, length));
}
}
break;
}
default:
throw std::runtime_error("Input layout format not yet supported");
}
}
// unroll set_input for dimension 1, 2, 3
template <typename Tfloat, typename Tallocator>
inline void set_input(std::vector<std::vector<char, Tallocator>>& input,
const fft_array_type itype,
const std::vector<size_t>& length,
const std::vector<size_t>& istride,
const size_t idist,
const size_t nbatch)
{
switch(length.size())
{
case 1:
set_input<Tfloat>(input, itype, length[0], istride[0], idist, nbatch);
break;
case 2:
set_input<Tfloat>(input,
itype,
std::make_tuple(length[0], length[1]),
std::make_tuple(istride[0], istride[1]),
idist,
nbatch);
break;
case 3:
set_input<Tfloat>(input,
itype,
std::make_tuple(length[0], length[1], length[2]),
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
nbatch);
break;
default:
abort();
}
}
// Given a data type and precision, the distance between batches, and
// the batch size, allocate the required host buffer(s).
template <typename Allocator = std::allocator<char>>
inline std::vector<std::vector<char, Allocator>> allocate_host_buffer(
const fft_precision precision, const fft_array_type type, const std::vector<size_t>& size)
{
std::vector<std::vector<char, Allocator>> buffers(size.size());
for(int i = 0; i < size.size(); ++i)
{
buffers[i].resize(size[i] * var_size<size_t>(precision, type));
}
return buffers;
}
// Given a data type and dimensions, fill the buffer, imposing Hermitian symmetry if
// necessary.
// NB: length is the logical size of the FFT, and not necessarily the data dimensions
template <typename Allocator = std::allocator<char>>
inline void compute_input(const fft_params& params,
std::vector<std::vector<char, Allocator>>& input)
{
switch(params.precision)
{
case fft_precision_double:
set_input<double>(
input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch);
break;
case fft_precision_single:
set_input<float>(
input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch);
break;
}
if(params.itype == fft_array_type_hermitian_interleaved
|| params.itype == fft_array_type_hermitian_planar)
{
switch(params.precision)
{
case fft_precision_double:
impose_hermitian_symmetry<double>(
input, params.length, params.istride, params.idist, params.nbatch);
break;
case fft_precision_single:
impose_hermitian_symmetry<float>(
input, params.length, params.istride, params.idist, params.nbatch);
break;
}
}
}
#endif
|
csr.c | /*!
* \file
*
* \brief Various routines with dealing with CSR matrices
*
* \author George Karypis
* \version\verbatim $Id: csr.c 13437 2013-01-11 21:54:10Z karypis $ \endverbatim
*/
#include <GKlib.h>
#define OMPMINOPS 50000
/*************************************************************************/
/*! Allocate memory for a CSR matrix and initializes it
\returns the allocated matrix. The various fields are set to NULL.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Create()
{
gk_csr_t *mat;
mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat");
gk_csr_Init(mat);
return mat;
}
/*************************************************************************/
/*! Initializes the matrix
\param mat is the matrix to be initialized.
*/
/*************************************************************************/
void gk_csr_Init(gk_csr_t *mat)
{
memset(mat, 0, sizeof(gk_csr_t));
mat->nrows = mat->ncols = -1;
}
/*************************************************************************/
/*! Frees all the memory allocated for matrix.
\param mat is the matrix to be freed.
*/
/*************************************************************************/
void gk_csr_Free(gk_csr_t **mat)
{
if (*mat == NULL)
return;
gk_csr_FreeContents(*mat);
gk_free((void **)mat, LTERM);
}
/*************************************************************************/
/*! Frees only the memory allocated for the matrix's different fields and
sets them to NULL.
\param mat is the matrix whose contents will be freed.
*/
/*************************************************************************/
void gk_csr_FreeContents(gk_csr_t *mat)
{
gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids,
&mat->colptr, &mat->colind, &mat->colval, &mat->colids,
&mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums,
&mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols,
&mat->rwgts, &mat->cwgts,
LTERM);
}
/*************************************************************************/
/*! Returns a copy of a matrix.
\param mat is the matrix to be duplicated.
\returns the newly created copy of the matrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Dup(gk_csr_t *mat)
{
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr,
gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr"));
if (mat->rowids)
nmat->rowids = gk_icopy(mat->nrows, mat->rowids,
gk_imalloc(mat->nrows, "gk_csr_Dup: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms"));
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind,
gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval,
gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval"));
/* copy the col structure */
if (mat->colptr)
nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr,
gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr"));
if (mat->colids)
nmat->colids = gk_icopy(mat->ncols, mat->colids,
gk_imalloc(mat->ncols, "gk_csr_Dup: colids"));
if (mat->cnorms)
nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms"));
if (mat->colind)
nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind,
gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind"));
if (mat->colval)
nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval,
gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containint a set of consecutive rows.
\param mat is the original matrix.
\param rstart is the starting row.
\param nrows is the number of rows from rstart to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows)
{
ssize_t i;
gk_csr_t *nmat;
if (rstart+nrows > mat->nrows)
return NULL;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart,
gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr"));
for (i=nrows; i>=0; i--)
nmat->rowptr[i] -= nmat->rowptr[0];
ASSERT(nmat->rowptr[0] == 0);
if (mat->rowids)
nmat->rowids = gk_icopy(nrows, mat->rowids+rstart,
gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms"));
if (mat->rsums)
nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums"));
ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]);
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowind+mat->rowptr[rstart],
gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowval+mat->rowptr[rstart],
gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containing a certain set of rows.
\param mat is the original matrix.
\param nrows is the number of rows to extract.
\param rind is the set of row numbers to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind)
{
ssize_t i, ii, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<nrows; i++)
nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]];
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, ii=0; ii<nrows; ii++) {
i = rind[ii];
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix corresponding to a specified partitioning of rows.
\param mat is the original matrix.
\param part is the partitioning vector of the rows.
\param pid is the partition ID that will be extracted.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid)
{
ssize_t i, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = 0;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
nmat->nrows++;
nnz += mat->rowptr[i+1]-mat->rowptr[i];
}
}
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Splits the matrix into multiple sub-matrices based on the provided
color array.
\param mat is the original matrix.
\param color is an array of size equal to the number of non-zeros
in the matrix (row-wise structure). The matrix is split into
as many parts as the number of colors. For meaningfull results,
the colors should be numbered consecutively starting from 0.
\returns an array of matrices for each supplied color number.
*/
/**************************************************************************/
gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color)
{
ssize_t i, j;
int nrows, ncolors;
ssize_t *rowptr;
int *rowind;
float *rowval;
gk_csr_t **smats;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
ncolors = gk_imax(rowptr[nrows], color)+1;
smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats");
for (i=0; i<ncolors; i++) {
smats[i] = gk_csr_Create();
smats[i]->nrows = mat->nrows;
smats[i]->ncols = mat->ncols;
smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
smats[color[j]]->rowptr[i]++;
}
for (i=0; i<ncolors; i++)
MAKECSR(j, nrows, smats[i]->rowptr);
for (i=0; i<ncolors; i++) {
smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind");
smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j];
smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j];
smats[color[j]]->rowptr[i]++;
}
}
for (i=0; i<ncolors; i++)
SHIFTCSR(j, nrows, smats[i]->rowptr);
return smats;
}
/**************************************************************************/
/*! Reads a CSR matrix from the supplied file and stores it the matrix's
forward structure.
\param filename is the file that stores the data.
\param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO,
GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL
specifying the type of the input format.
The GK_CSR_FMT_CSR does not contain a header
line, whereas the GK_CSR_FMT_BINROW is a binary format written
by gk_csr_Write() using the same format specifier.
\param readvals is either 1 or 0, indicating if the CSR file contains
values or it does not. It only applies when GK_CSR_FMT_CSR is
used.
\param numbering is either 1 or 0, indicating if the numbering of the
indices start from 1 or 0, respectively. If they start from 1,
they are automatically decreamented during input so that they
will start from 0. It only applies when GK_CSR_FMT_CSR is
used.
\returns the matrix that was read.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering)
{
ssize_t i, k, l;
size_t nfields, nrows, ncols, nnz, fmt, ncon;
size_t lnlen;
ssize_t *rowptr;
int *rowind, ival;
float *rowval=NULL, fval;
int readsizes, readwgts;
char *line=NULL, *head, *tail, fmtstr[256];
FILE *fpin;
gk_csr_t *mat=NULL;
if (!gk_fexists(filename))
gk_errexit(SIGERR, "File %s does not exist!\n", filename);
if (format == GK_CSR_FMT_BINROW) {
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr");
if ((ssize_t)fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1)
gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename);
mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind");
if ((ssize_t)fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename);
if (readvals == 1) {
mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval");
if ((ssize_t)fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
}
if (format == GK_CSR_FMT_BINCOL) {
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr");
if ((ssize_t)fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1)
gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename);
mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind");
if ((ssize_t)fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename);
if (readvals) {
mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval");
if ((ssize_t)fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
}
if (format == GK_CSR_FMT_CLUTO) {
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3)
gk_errexit(SIGERR, "Header line must contain 3 integers.\n");
readsizes = 0;
readwgts = 0;
readvals = 1;
numbering = 1;
}
else if (format == GK_CSR_FMT_METIS) {
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
fmt = ncon = 0;
nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon);
if (nfields < 2)
gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n");
ncols = nrows;
nnz *= 2;
if (fmt > 111)
gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt);
sprintf(fmtstr, "%03zu", fmt%1000);
readsizes = (fmtstr[0] == '1');
readwgts = (fmtstr[1] == '1');
readvals = (fmtstr[2] == '1');
numbering = 1;
ncon = (ncon == 0 ? 1 : ncon);
}
else {
readsizes = 0;
readwgts = 0;
gk_getfilestats(filename, &nrows, &nnz, NULL, NULL);
if (readvals == 1 && nnz%2 == 1)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals);
if (readvals == 1)
nnz = nnz/2;
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
}
mat = gk_csr_Create();
mat->nrows = nrows;
rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr");
rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind");
if (readvals != 2)
rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval");
if (readsizes)
mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes");
if (readwgts)
mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts");
/*----------------------------------------------------------------------
* Read the sparse matrix file
*---------------------------------------------------------------------*/
numbering = (numbering ? - 1 : 0);
for (ncols=0, rowptr[0]=0, k=0, i=0; i < (ssize_t)nrows; i++) {
do {
if (gk_getline(&line, &lnlen, fpin) == -1)
gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i);
} while (line[0] == '%');
head = line;
tail = NULL;
/* Read vertex sizes */
if (readsizes) {
#ifdef __MSC__
mat->rsizes[i] = (float)strtod(head, &tail);
#else
mat->rsizes[i] = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1);
if (mat->rsizes[i] < 0)
errexit("The size for vertex %zd must be >= 0\n", i+1);
head = tail;
}
/* Read vertex weights */
if (readwgts) {
for (l=0; l < (ssize_t)ncon; l++) {
#ifdef __MSC__
mat->rwgts[i*ncon+l] = (float)strtod(head, &tail);
#else
mat->rwgts[i*ncon+l] = strtof(head, &tail);
#endif
if (tail == head)
errexit("The line for vertex %zd does not have enough weights "
"for the %d constraints.\n", i+1, ncon);
if (mat->rwgts[i*ncon+l] < 0)
errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l);
head = tail;
}
}
/* Read the rest of the row */
while (1) {
ival = (int)strtol(head, &tail, 0);
if (tail == head)
break;
head = tail;
if ((rowind[k] = ival + numbering) < 0)
gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i);
ncols = gk_max((ssize_t)rowind[k], (ssize_t)ncols);
if (readvals == 1) {
#ifdef __MSC__
fval = (float)strtod(head, &tail);
#else
fval = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k);
head = tail;
rowval[k] = fval;
}
k++;
}
rowptr[i+1] = k;
}
if (format == GK_CSR_FMT_METIS) {
ASSERT(ncols+1 == mat->nrows);
mat->ncols = mat->nrows;
}
else {
mat->ncols = ncols+1;
}
if (k != (ssize_t)nnz)
gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in "
"the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k);
gk_fclose(fpin);
gk_free((void **)&line, LTERM);
return mat;
}
/**************************************************************************/
/*! Writes the row-based structure of a matrix into a file.
\param mat is the matrix to be written,
\param filename is the name of the output file.
\param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR,
GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL.
\param writevals is either 1 or 0 indicating if the values will be
written or not. This is only applicable when GK_CSR_FMT_CSR
is used.
\param numbering is either 1 or 0 indicating if the internal 0-based
numbering will be shifted by one or not during output. This
is only applicable when GK_CSR_FMT_CSR is used.
*/
/**************************************************************************/
void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering)
{
ssize_t i, j;
FILE *fpout;
if (format == GK_CSR_FMT_BINROW) {
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout);
fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout);
if (writevals)
fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout);
gk_fclose(fpout);
return;
}
if (format == GK_CSR_FMT_BINCOL) {
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout);
fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout);
if (writevals)
fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout);
gk_fclose(fpout);
return;
}
if (filename)
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
else
fpout = stdout;
if (format == GK_CSR_FMT_CLUTO) {
fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]);
writevals = 1;
numbering = 1;
}
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0));
if (writevals)
fprintf(fpout, " %f", mat->rowval[j]);
}
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
}
/*************************************************************************/
/*! Prunes certain rows/columns of the matrix. The prunning takes place
by analyzing the row structure of the matrix. The prunning takes place
by removing rows/columns but it does not affect the numbering of the
remaining rows/columns.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param minf is the minimum number of rows (columns) that a column (row) must
be present in order to be kept,
\param maxf is the maximum number of rows (columns) that a column (row) must
be present at in order to be kept.
\returns the prunned matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf)
{
ssize_t i, j, nnz;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind, *collen;
float *rowval, *nrowval;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval");
switch (what) {
case GK_CSR_COL:
collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ASSERT(rowind[j] < ncols);
collen[rowind[j]]++;
}
}
for (i=0; i<ncols; i++)
collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0);
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (collen[rowind[j]]) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&collen, LTERM);
break;
case GK_CSR_ROW:
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) {
for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight entries whose
sum accounts for a certain fraction of the overall weight of the
row/column.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param norm indicates the norm that will be used to aggregate the weights
and possible values are 1 or 2,
\param fraction is the fraction of the overall norm that will be retained
by the kept entries.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction)
{
ssize_t i, j, nnz;
int nrows, ncols, ncand, maxlen=0;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval, rsum, tsum;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++)
maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<ncols; i++) {
for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
gk_free((void **)&cand, LTERM);
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
for (i=0; i<nrows; i++)
maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<nrows; i++) {
for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[rowptr[i]+j] = cand[j].val;
nrowval[rowptr[i]+j] = cand[j].key;
}
nrowptr[i+1] = rowptr[i]+j;
}
gk_free((void **)&cand, LTERM);
}
/* compact nrowind/nrowval */
nrowptr[0] = nnz = 0;
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight top-K entries
along each row/column and those entries whose weight is greater than
a specified value.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param topk is the number of the highest weight entries to keep.
\param keepval is the weight of a term above which will be kept. This
is used to select additional terms past the first topk.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval)
{
ssize_t i, j, k, nnz;
int nrows, ncols, ncand;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++) {
for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++) {
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
for (; j<ncand; j++) {
if (cand[j].key < keepval)
break;
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
gk_free((void **)&cand, LTERM);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++, nnz++) {
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
for (; j<ncand; j++, nnz++) {
if (cand[j].key < keepval)
break;
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&cand, LTERM);
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the terms whose contribution to
the total length of the document is greater than a user-splied multiple
over the average.
This routine assumes that the vectors are normalized to be unit length.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param zscore is the multiplicative factor over the average contribution
to the length of the document.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore)
{
ssize_t i, j, nnz;
int nrows;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind;
float *rowval, *nrowval, avgwgt;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval");
switch (what) {
case GK_CSR_COL:
gk_errexit(SIGERR, "This has not been implemented yet.\n");
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
avgwgt = zscore/(rowptr[i+1]-rowptr[i]);
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] > avgwgt) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Compacts the column-space of the matrix by removing empty columns.
As a result of the compaction, the column numbers are renumbered.
The compaction operation is done in place and only affects the row-based
representation of the matrix.
The new columns are ordered in decreasing frequency.
\param mat the matrix whose empty columns will be removed.
*/
/**************************************************************************/
void gk_csr_CompactColumns(gk_csr_t *mat)
{
ssize_t i;
int nrows, ncols, nncols;
ssize_t *rowptr;
int *rowind, *colmap;
gk_ikv_t *clens;
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap");
clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens");
for (i=0; i<ncols; i++) {
clens[i].key = 0;
clens[i].val = i;
}
for (i=0; i<rowptr[nrows]; i++)
clens[rowind[i]].key++;
gk_ikvsortd(ncols, clens);
for (nncols=0, i=0; i<ncols; i++) {
if (clens[i].key > 0)
colmap[clens[i].val] = nncols++;
else
break;
}
for (i=0; i<rowptr[nrows]; i++)
rowind[i] = colmap[rowind[i]];
mat->ncols = nncols;
gk_free((void **)&colmap, &clens, LTERM);
}
/*************************************************************************/
/*! Sorts the indices in increasing order
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of
indices to sort.
*/
/**************************************************************************/
void gk_csr_SortIndices(gk_csr_t *mat, int what)
{
int n, nn=0;
ssize_t *ptr;
int *ind;
float *val;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
n = mat->nrows;
ptr = mat->rowptr;
ind = mat->rowind;
val = mat->rowval;
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
n = mat->ncols;
ptr = mat->colptr;
ind = mat->colind;
val = mat->colval;
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
#pragma omp parallel if (n > 100)
{
ssize_t i, j, k;
gk_ikv_t *cand;
float *tval;
#pragma omp single
for (i=0; i<n; i++)
nn = gk_max(nn, ptr[i+1]-ptr[i]);
cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand");
tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval");
#pragma omp for schedule(static)
for (i=0; i<n; i++) {
for (k=0, j=ptr[i]; j<ptr[i+1]; j++) {
if (j > ptr[i] && ind[j] < ind[j-1])
k = 1; /* an inversion */
cand[j-ptr[i]].val = j-ptr[i];
cand[j-ptr[i]].key = ind[j];
tval[j-ptr[i]] = val[j];
}
if (k) {
gk_ikvsorti(ptr[i+1]-ptr[i], cand);
for (j=ptr[i]; j<ptr[i+1]; j++) {
ind[j] = cand[j-ptr[i]].key;
val[j] = tval[cand[j-ptr[i]].val];
}
}
}
gk_free((void **)&cand, &tval, LTERM);
}
}
/*************************************************************************/
/*! Creates a row/column index from the column/row data.
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which index
will be created.
*/
/**************************************************************************/
void gk_csr_CreateIndex(gk_csr_t *mat, int what)
{
/* 'f' stands for forward, 'r' stands for reverse */
ssize_t i, j, k, nf, nr;
ssize_t *fptr, *rptr;
int *find, *rind;
float *fval, *rval;
switch (what) {
case GK_CSR_COL:
nf = mat->nrows;
fptr = mat->rowptr;
find = mat->rowind;
fval = mat->rowval;
if (mat->colptr) gk_free((void **)&mat->colptr, LTERM);
if (mat->colind) gk_free((void **)&mat->colind, LTERM);
if (mat->colval) gk_free((void **)&mat->colval, LTERM);
nr = mat->ncols;
rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
case GK_CSR_ROW:
nf = mat->ncols;
fptr = mat->colptr;
find = mat->colind;
fval = mat->colval;
if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM);
if (mat->rowind) gk_free((void **)&mat->rowind, LTERM);
if (mat->rowval) gk_free((void **)&mat->rowval, LTERM);
nr = mat->nrows;
rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rptr[find[j]]++;
}
MAKECSR(i, nr, rptr);
if (rptr[nr] > 6*nr) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
SHIFTCSR(i, nr, rptr);
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rval[rptr[find[j]]++] = fval[j];
}
SHIFTCSR(i, nr, rptr);
}
}
else {
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++) {
k = find[j];
rind[rptr[k]] = i;
rval[rptr[k]++] = fval[j];
}
}
}
else {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
}
SHIFTCSR(i, nr, rptr);
}
}
/*************************************************************************/
/*! Normalizes the rows/columns of the matrix to be unit
length.
\param mat the matrix itself,
\param what indicates what will be normalized and is obtained by
specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL.
\param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm
*/
/**************************************************************************/
void gk_csr_Normalize(gk_csr_t *mat, int what, int norm)
{
ssize_t i, j;
int n;
ssize_t *ptr;
float *val, sum;
if (what&GK_CSR_ROW && mat->rowval) {
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j]; /* assume val[j] > 0 */
}
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
if (what&GK_CSR_COL && mat->colval) {
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++)
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j];
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
}
/*************************************************************************/
/*! Applies different row scaling methods.
\param mat the matrix itself,
\param type indicates the type of row scaling. Possible values are:
GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2.
*/
/**************************************************************************/
void gk_csr_Scale(gk_csr_t *mat, int type)
{
ssize_t i, j;
int nrows, ncols, nnzcols, bgfreq;
ssize_t *rowptr;
int *rowind, *collen;
float *rowval, *cscale, maxtf;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
switch (type) {
case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .5 + .5*rowval[j]/maxtf;
}
}
break;
case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .1 + .9*rowval[j]/maxtf;
}
}
break;
case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j])));
}
}
}
break;
case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j]))));
}
}
}
break;
case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65));
}
}
}
break;
case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75));
}
}
}
break;
case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85));
}
}
}
break;
case GK_CSR_LOG: /* TF' = 1+log_2(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
double logscale = 1.0/log(2.0);
#pragma omp for schedule(static,32)
for (i=0; i<rowptr[nrows]; i++) {
if (rowval[i] != 0.0)
rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale;
}
#ifdef XXX
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale;
//rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale);
}
}
#endif
}
break;
case GK_CSR_IDF: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
#pragma omp parallel if (ncols > OMPMINOPS)
{
#pragma omp for schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0);
}
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
}
gk_free((void **)&cscale, &collen, LTERM);
break;
case GK_CSR_IDF2: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
nnzcols = 0;
#pragma omp parallel if (ncols > OMPMINOPS)
{
#pragma omp for schedule(static) reduction(+:nnzcols)
for (i=0; i<ncols; i++)
nnzcols += (collen[i] > 0 ? 1 : 0);
bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols));
printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq);
#pragma omp for schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0);
}
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
}
gk_free((void **)&cscale, &collen, LTERM);
break;
default:
gk_errexit(SIGERR, "Unknown scaling type of %d\n", type);
}
}
/*************************************************************************/
/*! Computes the sums of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
sums to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSums(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *sums;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rsums)
gk_free((void **)&mat->rsums, LTERM);
sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->csums)
gk_free((void **)&mat->csums, LTERM);
sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
default:
gk_errexit(SIGERR, "Invalid sum type of %d.\n", what);
return;
}
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1);
}
/*************************************************************************/
/*! Computes the squared of the norms of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
squared norms to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *norms;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);
norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);
norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
default:
gk_errexit(SIGERR, "Invalid norm type of %d.\n", what);
return;
}
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1);
}
/*************************************************************************/
/*! Computes the similarity between two rows/columns
\param mat the matrix itself. The routine assumes that the indices
are sorted in increasing order.
\param i1 is the first row/column,
\param i2 is the second row/column,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of
objects between the similarity will be computed,
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\returns the similarity between the two rows/columns.
*/
/**************************************************************************/
float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype)
{
int nind1, nind2;
int *ind1, *ind2;
float *val1, *val2, stat1, stat2, sim;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
nind1 = mat->rowptr[i1+1]-mat->rowptr[i1];
nind2 = mat->rowptr[i2+1]-mat->rowptr[i2];
ind1 = mat->rowind + mat->rowptr[i1];
ind2 = mat->rowind + mat->rowptr[i2];
val1 = mat->rowval + mat->rowptr[i1];
val2 = mat->rowval + mat->rowptr[i2];
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
nind1 = mat->colptr[i1+1]-mat->colptr[i1];
nind2 = mat->colptr[i2+1]-mat->colptr[i2];
ind1 = mat->colind + mat->colptr[i1];
ind2 = mat->colind + mat->colptr[i2];
val1 = mat->colval + mat->colptr[i1];
val2 = mat->colval + mat->colptr[i2];
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return 0.0;
}
switch (simtype) {
case GK_CSR_COS:
case GK_CSR_JAC:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else {
sim += val1[i1]*val2[i2];
stat1 += val1[i1]*val1[i1];
stat2 += val2[i2]*val2[i2];
i1++;
i2++;
}
}
if (simtype == GK_CSR_COS)
sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0);
else
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_MIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_AMIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1 > 0.0 ? sim/stat1 : 0.0);
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
return sim;
}
/*************************************************************************/
/*! Finds the n most similar rows (neighbors) to the query using cosine
similarity.
\param mat the matrix itself
\param nqterms is the number of columns in the query
\param qind is the list of query columns
\param qval is the list of correspodning query weights
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\param nsim is the maximum number of requested most similar rows.
If -1 is provided, then everything is returned unsorted.
\param minsim is the minimum similarity of the requested most
similar rows
\param hits is the result set. This array should be at least
of length nsim.
\param i_marker is an array of size equal to the number of rows
whose values are initialized to -1. If NULL is provided
then this array is allocated and freed internally.
\param i_cand is an array of size equal to the number of rows.
If NULL is provided then this array is allocated and freed
internally.
\returns the number of identified most similar rows, which can be
smaller than the requested number of nnbrs in those cases
in which there are no sufficiently many neighbors.
*/
/**************************************************************************/
int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind,
float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits,
int *i_marker, gk_fkv_t *i_cand)
{
ssize_t i, ii, j, k;
int nrows, ncols, ncand;
ssize_t *colptr;
int *colind, *marker;
float *colval, *rnorms, mynorm, *rsums, mysum;
gk_fkv_t *cand;
if (nqterms == 0)
return 0;
nrows = mat->nrows;
ncols = mat->ncols;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker"));
cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand"));
switch (simtype) {
case GK_CSR_COS:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
break;
case GK_CSR_JAC:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
rnorms = mat->rnorms;
mynorm = gk_fdot(nqterms, qval, 1, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key);
break;
case GK_CSR_MIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
rsums = mat->rsums;
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key);
break;
/* Assymetric MIN similarity */
case GK_CSR_AMIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/mysum;
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
/* go and prune the hits that are bellow minsim */
for (j=0, i=0; i<ncand; i++) {
marker[cand[i].val] = -1;
if (cand[i].key >= minsim)
cand[j++] = cand[i];
}
ncand = j;
if (nsim == -1 || nsim >= ncand) {
nsim = ncand;
}
else {
nsim = gk_min(nsim, ncand);
gk_dfkvkselect(ncand, nsim, cand);
gk_fkvsortd(nsim, cand);
}
gk_fkvcopy(nsim, cand, hits);
if (i_marker == NULL)
gk_free((void **)&marker, LTERM);
if (i_cand == NULL)
gk_free((void **)&cand, LTERM);
return nsim;
}
|
convolution_sgemm_pack8to1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack8to1_int8_msa(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
int64_t* tmpptr = tmp.channel(i / 2);
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
v16i8 _v = __msa_ld_b(img0, 0);
__msa_st_b(_v, tmpptr, 0);
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
int64_t* tmpptr = tmp.channel(i / 2 + i % 2);
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
for (; i + 1 < size; i += 2)
{
const signed char* tmpptr = tmp.channel(i / 2);
const signed char* kptr = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
v4i32 _sum00 = __msa_fill_w(0);
v4i32 _sum01 = __msa_fill_w(0);
v4i32 _sum02 = __msa_fill_w(0);
v4i32 _sum03 = __msa_fill_w(0);
v4i32 _sum10 = __msa_fill_w(0);
v4i32 _sum11 = __msa_fill_w(0);
v4i32 _sum12 = __msa_fill_w(0);
v4i32 _sum13 = __msa_fill_w(0);
int j = 0;
for (; j < nn; j++)
{
__builtin_prefetch(tmpptr + 64);
__builtin_prefetch(kptr + 128);
v16i8 _val01 = __msa_ld_b(tmpptr, 0);
v16i8 _extval01 = __msa_clti_s_b(_val01, 0);
v8i16 _val0 = (v8i16)__msa_ilvr_b(_extval01, _val01);
v8i16 _val1 = (v8i16)__msa_ilvl_b(_extval01, _val01);
v16i8 _w01 = __msa_ld_b(kptr, 0);
v16i8 _w23 = __msa_ld_b(kptr + 16, 0);
v16i8 _extw01 = __msa_clti_s_b(_w01, 0);
v16i8 _extw23 = __msa_clti_s_b(_w23, 0);
v8i16 _w0 = (v8i16)__msa_ilvr_b(_extw01, _w01);
v8i16 _w1 = (v8i16)__msa_ilvl_b(_extw01, _w01);
v8i16 _w2 = (v8i16)__msa_ilvr_b(_extw23, _w23);
v8i16 _w3 = (v8i16)__msa_ilvl_b(_extw23, _w23);
v8i16 _s00 = __msa_mulv_h(_val0, _w0);
v8i16 _s01 = __msa_mulv_h(_val0, _w1);
v8i16 _s02 = __msa_mulv_h(_val0, _w2);
v8i16 _s03 = __msa_mulv_h(_val0, _w3);
v8i16 _s10 = __msa_mulv_h(_val1, _w0);
v8i16 _s11 = __msa_mulv_h(_val1, _w1);
v8i16 _s12 = __msa_mulv_h(_val1, _w2);
v8i16 _s13 = __msa_mulv_h(_val1, _w3);
_sum00 = __msa_addv_w(_sum00, __msa_hadd_s_w(_s00, _s00));
_sum01 = __msa_addv_w(_sum01, __msa_hadd_s_w(_s01, _s01));
_sum02 = __msa_addv_w(_sum02, __msa_hadd_s_w(_s02, _s02));
_sum03 = __msa_addv_w(_sum03, __msa_hadd_s_w(_s03, _s03));
_sum10 = __msa_addv_w(_sum10, __msa_hadd_s_w(_s10, _s10));
_sum11 = __msa_addv_w(_sum11, __msa_hadd_s_w(_s11, _s11));
_sum12 = __msa_addv_w(_sum12, __msa_hadd_s_w(_s12, _s12));
_sum13 = __msa_addv_w(_sum13, __msa_hadd_s_w(_s13, _s13));
tmpptr += 16;
kptr += 32;
}
// transpose 4x4
{
v4i32 _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = __msa_ilvr_w(_sum01, _sum00);
_tmp1 = __msa_ilvr_w(_sum03, _sum02);
_tmp2 = __msa_ilvl_w(_sum01, _sum00);
_tmp3 = __msa_ilvl_w(_sum03, _sum02);
_sum00 = (v4i32)__msa_ilvr_d((v2i64)_tmp1, (v2i64)_tmp0);
_sum01 = (v4i32)__msa_ilvl_d((v2i64)_tmp1, (v2i64)_tmp0);
_sum02 = (v4i32)__msa_ilvr_d((v2i64)_tmp3, (v2i64)_tmp2);
_sum03 = (v4i32)__msa_ilvl_d((v2i64)_tmp3, (v2i64)_tmp2);
}
{
v4i32 _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = __msa_ilvr_w(_sum11, _sum10);
_tmp1 = __msa_ilvr_w(_sum13, _sum12);
_tmp2 = __msa_ilvl_w(_sum11, _sum10);
_tmp3 = __msa_ilvl_w(_sum13, _sum12);
_sum10 = (v4i32)__msa_ilvr_d((v2i64)_tmp1, (v2i64)_tmp0);
_sum11 = (v4i32)__msa_ilvl_d((v2i64)_tmp1, (v2i64)_tmp0);
_sum12 = (v4i32)__msa_ilvr_d((v2i64)_tmp3, (v2i64)_tmp2);
_sum13 = (v4i32)__msa_ilvl_d((v2i64)_tmp3, (v2i64)_tmp2);
}
_sum00 = __msa_addv_w(_sum00, _sum01);
_sum02 = __msa_addv_w(_sum02, _sum03);
_sum10 = __msa_addv_w(_sum10, _sum11);
_sum12 = __msa_addv_w(_sum12, _sum13);
_sum00 = __msa_addv_w(_sum00, _sum02);
_sum10 = __msa_addv_w(_sum10, _sum12);
int sum[8];
__msa_st_w(_sum00, sum, 0);
__msa_st_w(_sum10, sum + 4, 0);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
const signed char* kptr = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
v4i32 _sum2 = __msa_fill_w(0);
v4i32 _sum3 = __msa_fill_w(0);
int j = 0;
for (; j < nn; j++)
{
__builtin_prefetch(tmpptr + 32);
__builtin_prefetch(kptr + 128);
v16i8 _val = __msa_ld_b(tmpptr, 0);
v8i16 _val16 = (v8i16)__msa_ilvr_b(__msa_clti_s_b(_val, 0), _val);
v16i8 _w01 = __msa_ld_b(kptr, 0);
v16i8 _w23 = __msa_ld_b(kptr + 16, 0);
v16i8 _extw01 = __msa_clti_s_b(_w01, 0);
v16i8 _extw23 = __msa_clti_s_b(_w23, 0);
v8i16 _w0 = (v8i16)__msa_ilvr_b(_extw01, _w01);
v8i16 _w1 = (v8i16)__msa_ilvl_b(_extw01, _w01);
v8i16 _w2 = (v8i16)__msa_ilvr_b(_extw23, _w23);
v8i16 _w3 = (v8i16)__msa_ilvl_b(_extw23, _w23);
v8i16 _s0 = __msa_mulv_h(_val16, _w0);
v8i16 _s1 = __msa_mulv_h(_val16, _w1);
v8i16 _s2 = __msa_mulv_h(_val16, _w2);
v8i16 _s3 = __msa_mulv_h(_val16, _w3);
_sum0 = __msa_addv_w(_sum0, __msa_hadd_s_w(_s0, _s0));
_sum1 = __msa_addv_w(_sum1, __msa_hadd_s_w(_s1, _s1));
_sum2 = __msa_addv_w(_sum2, __msa_hadd_s_w(_s2, _s2));
_sum3 = __msa_addv_w(_sum3, __msa_hadd_s_w(_s3, _s3));
tmpptr += 8;
kptr += 32;
}
// transpose 4x4
{
v4i32 _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = __msa_ilvr_w(_sum1, _sum0);
_tmp1 = __msa_ilvr_w(_sum3, _sum2);
_tmp2 = __msa_ilvl_w(_sum1, _sum0);
_tmp3 = __msa_ilvl_w(_sum3, _sum2);
_sum0 = (v4i32)__msa_ilvr_d((v2i64)_tmp1, (v2i64)_tmp0);
_sum1 = (v4i32)__msa_ilvl_d((v2i64)_tmp1, (v2i64)_tmp0);
_sum2 = (v4i32)__msa_ilvr_d((v2i64)_tmp3, (v2i64)_tmp2);
_sum3 = (v4i32)__msa_ilvl_d((v2i64)_tmp3, (v2i64)_tmp2);
}
_sum0 = __msa_addv_w(_sum0, _sum1);
_sum2 = __msa_addv_w(_sum2, _sum3);
_sum0 = __msa_addv_w(_sum0, _sum2);
int sum[4];
__msa_st_w(_sum0, sum, 0);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
for (; i + 1 < size; i += 2)
{
const signed char* tmpptr = tmp.channel(i / 2);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
int j = 0;
for (; j < nn; j++)
{
__builtin_prefetch(tmpptr + 64);
__builtin_prefetch(kptr + 32);
v16i8 _val01 = __msa_ld_b(tmpptr, 0);
v16i8 _extval01 = __msa_clti_s_b(_val01, 0);
v8i16 _val0 = (v8i16)__msa_ilvr_b(_extval01, _val01);
v8i16 _val1 = (v8i16)__msa_ilvl_b(_extval01, _val01);
v16i8 _w = __msa_ld_b(kptr, 0);
v8i16 _w16 = (v8i16)__msa_ilvr_b(__msa_clti_s_b(_w, 0), _w);
v8i16 _s0 = __msa_mulv_h(_val0, _w16);
v8i16 _s1 = __msa_mulv_h(_val1, _w16);
_sum0 = __msa_addv_w(_sum0, __msa_hadd_s_w(_s0, _s0));
_sum1 = __msa_addv_w(_sum1, __msa_hadd_s_w(_s1, _s1));
tmpptr += 16;
kptr += 8;
}
outptr0[0] = __msa_reduce_add_w(_sum0);
outptr0[1] = __msa_reduce_add_w(_sum1);
outptr0 += 2;
}
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
const signed char* kptr = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
v4i32 _sum = __msa_fill_w(0);
int j = 0;
for (; j < nn; j++)
{
__builtin_prefetch(tmpptr + 32);
__builtin_prefetch(kptr + 32);
v16i8 _val = __msa_ld_b(tmpptr, 0);
v8i16 _val16 = (v8i16)__msa_ilvr_b(__msa_clti_s_b(_val, 0), _val);
v16i8 _w = __msa_ld_b(kptr, 0);
v8i16 _w16 = (v8i16)__msa_ilvr_b(__msa_clti_s_b(_w, 0), _w);
v8i16 _s0 = __msa_mulv_h(_val16, _w16);
_sum = __msa_addv_w(_sum, __msa_hadd_s_w(_s0, _s0));
tmpptr += 8;
kptr += 8;
}
outptr0[0] = __msa_reduce_add_w(_sum);
outptr0 += 1;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack8to1_int8_msa(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (outch >= 4)
kernel_tm.create(32 * maxk, inch / 8, outch / 4 + outch % 4, (size_t)1u);
else
kernel_tm.create(8 * maxk, inch / 8, outch, (size_t)1u);
int q = 0;
for (; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
// TODO unroll 2
for (; q < outch; q++)
{
signed char* g00 = kernel_tm.channel(q / 4 + q % 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
static void convolution_im2col_sgemm_pack8to1_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
int64_t* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const int64_t* sptr = img.row<const int64_t>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack8to1_int8_msa(bottom_im2col, top_blob, kernel, opt);
}
|
constitute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE %
% C O O NN N SS T I T U U T E %
% C O O N N N ESSS T I T U U T EEE %
% C O O N NN SS T I T U U T E %
% CCCC OOO N N SSSSS T IIIII T UUU T EEEEE %
% %
% %
% MagickCore Methods to Consitute an Image %
% %
% Software Design %
% Cristy %
% October 1998 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/client.h"
#include "MagickCore/coder-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/constitute-private.h"
#include "MagickCore/delegate.h"
#include "MagickCore/geometry.h"
#include "MagickCore/identify.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/statistic.h"
#include "MagickCore/stream.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Typedef declaractions.
*/
typedef struct _ConstituteInfo
{
const char
*caption,
*comment,
*dispose,
*label;
MagickBooleanType
sync_from_exif,
sync_from_tiff;
MagickStatusType
delay_flags;
size_t
delay;
ssize_t
ticks_per_second;
} ConstituteInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n s t i t u t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteImage() returns an image from the pixel data you supply.
% The pixel data must be in scanline order top-to-bottom. The data can be
% char, short int, int, float, or double. Float and double require the
% pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to
% create a 640x480 image from unsigned red-green-blue character data, use:
%
% image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception);
%
% The format of the ConstituteImage method is:
%
% Image *ConstituteImage(const size_t columns,const size_t rows,
% const char *map,const StorageType storage,const void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: width in pixels of the image.
%
% o rows: height in pixels of the image.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose
% from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConstituteImage(const size_t columns,const size_t rows,
const char *map,const StorageType storage,const void *pixels,
ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
ssize_t
i;
size_t
length;
/*
Allocate image structure.
*/
assert(map != (const char *) NULL);
assert(pixels != (void *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map);
image=AcquireImage((ImageInfo *) NULL,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
switch (storage)
{
case CharPixel: image->depth=8*sizeof(unsigned char); break;
case DoublePixel: image->depth=8*sizeof(double); break;
case FloatPixel: image->depth=8*sizeof(float); break;
case LongPixel: image->depth=8*sizeof(unsigned long); break;
case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break;
case ShortPixel: image->depth=8*sizeof(unsigned short); break;
default: break;
}
length=strlen(map);
for (i=0; i < (ssize_t) length; i++)
{
switch (map[i])
{
case 'a':
case 'A':
case 'O':
case 'o':
{
image->alpha_trait=BlendPixelTrait;
break;
}
case 'C':
case 'c':
case 'm':
case 'M':
case 'Y':
case 'y':
case 'K':
case 'k':
{
image->colorspace=CMYKColorspace;
break;
}
case 'I':
case 'i':
{
image->colorspace=GRAYColorspace;
break;
}
default:
{
if (length == 1)
image->colorspace=GRAYColorspace;
break;
}
}
}
status=SetImageExtent(image,columns,rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImage() returns all the properties of an image or image sequence
% except for the pixels. It is much faster and consumes far less memory
% than ReadImage(). On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the PingImage method is:
%
% Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Ping the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static size_t PingStream(const Image *magick_unused(image),
const void *magick_unused(pixels),const size_t columns)
{
magick_unreferenced(image);
magick_unreferenced(pixels);
return(columns);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport Image *PingImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*ping_info;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
ping_info=CloneImageInfo(image_info);
ping_info->ping=MagickTrue;
image=ReadStream(ping_info,&PingStream,exception);
if (image != (Image *) NULL)
{
ResetTimer(&image->timer);
if (ping_info->verbose != MagickFalse)
(void) IdentifyImage(image,stdout,MagickFalse,exception);
}
ping_info=DestroyImageInfo(ping_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImages() pings one or more images and returns them as an image list.
%
% The format of the PingImage method is:
%
% Image *PingImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PingImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
ping_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Ping image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
(void) SetImageOption(image_info,"filename",filename);
(void) CopyMagickString(image_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename,
(int) image_info->scene,ping_filename,exception);
if (LocaleCompare(ping_filename,image_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
read_info=CloneImageInfo(image_info);
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes == 0)
{
read_info=DestroyImageInfo(read_info);
return(PingImage(image_info,exception));
}
(void) CopyMagickString(ping_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename,
(int) scene,read_info->filename,exception);
image=PingImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
return(PingImage(image_info,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImage() reads an image or image sequence from a file or file handle.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadImage method is:
%
% Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Read the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType IsCoderAuthorized(const char *coder,
const PolicyRights rights,ExceptionInfo *exception)
{
if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse)
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",coder);
return(MagickFalse);
}
return(MagickTrue);
}
static void InitializeConstituteInfo(const ImageInfo *image_info,
ConstituteInfo *constitute_info)
{
const char
*option;
memset(constitute_info,0,sizeof(*constitute_info));
constitute_info->sync_from_exif=MagickTrue;
constitute_info->sync_from_tiff=MagickTrue;
option=GetImageOption(image_info,"exif:sync-image");
if (IsStringFalse(option) != MagickFalse)
constitute_info->sync_from_exif=MagickFalse;
option=GetImageOption(image_info,"tiff:sync-image");
if (IsStringFalse(option) != MagickFalse)
constitute_info->sync_from_tiff=MagickFalse;
constitute_info->caption=GetImageOption(image_info,"caption");
constitute_info->comment=GetImageOption(image_info,"comment");
constitute_info->label=GetImageOption(image_info,"label");
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
constitute_info->delay_flags=ParseGeometry(option,&geometry_info);
if (constitute_info->delay_flags != NoValue)
{
constitute_info->delay=floor(geometry_info.rho+0.5);
if ((constitute_info->delay_flags & SigmaValue) != 0)
constitute_info->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
}
}
static void SyncOrientationFromProperties(Image *image,
ConstituteInfo *constitute_info,ExceptionInfo *exception)
{
const char
*orientation;
orientation=(const char *) NULL;
if (constitute_info->sync_from_exif != MagickFalse)
{
orientation=GetImageProperty(image,"exif:Orientation",exception);
if (orientation != (const char *) NULL)
{
image->orientation=(OrientationType) StringToLong(orientation);
(void) DeleteImageProperty(image,"exif:Orientation");
}
}
if ((orientation == (const char *) NULL) &&
(constitute_info->sync_from_tiff != MagickFalse))
{
orientation=GetImageProperty(image,"tiff:Orientation",exception);
if (orientation != (const char *) NULL)
{
image->orientation=(OrientationType) StringToLong(orientation);
(void) DeleteImageProperty(image,"tiff:Orientation");
}
}
}
static void SyncResolutionFromProperties(Image *image,
ConstituteInfo *constitute_info, ExceptionInfo *exception)
{
const char
*resolution_x,
*resolution_y,
*resolution_units;
MagickBooleanType
used_tiff;
resolution_x=(const char *) NULL;
resolution_y=(const char *) NULL;
resolution_units=(const char *) NULL;
used_tiff=MagickFalse;
if (constitute_info->sync_from_exif != MagickFalse)
{
resolution_x=GetImageProperty(image,"exif:XResolution",exception);
resolution_y=GetImageProperty(image,"exif:YResolution",exception);
if ((resolution_x != (const char *) NULL) &&
(resolution_y != (const char *) NULL))
resolution_units=GetImageProperty(image,"exif:ResolutionUnit",
exception);
}
if ((resolution_x == (const char *) NULL) &&
(resolution_y == (const char *) NULL) &&
(constitute_info->sync_from_tiff != MagickFalse))
{
resolution_x=GetImageProperty(image,"tiff:XResolution",exception);
resolution_y=GetImageProperty(image,"tiff:YResolution",exception);
if ((resolution_x != (const char *) NULL) &&
(resolution_y != (const char *) NULL))
{
used_tiff=MagickTrue;
resolution_units=GetImageProperty(image,"tiff:ResolutionUnit",
exception);
}
}
if ((resolution_x != (const char *) NULL) &&
(resolution_y != (const char *) NULL))
{
GeometryInfo
geometry_info;
ssize_t
option_type;
geometry_info.rho=image->resolution.x;
geometry_info.sigma=1.0;
(void) ParseGeometry(resolution_x,&geometry_info);
if (geometry_info.sigma != 0)
image->resolution.x=geometry_info.rho/geometry_info.sigma;
if (strchr(resolution_x,',') != (char *) NULL)
image->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0;
geometry_info.rho=image->resolution.y;
geometry_info.sigma=1.0;
(void) ParseGeometry(resolution_y,&geometry_info);
if (geometry_info.sigma != 0)
image->resolution.y=geometry_info.rho/geometry_info.sigma;
if (strchr(resolution_y,',') != (char *) NULL)
image->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0;
if (resolution_units != (char *) NULL)
{
option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse,
resolution_units);
if (option_type >= 0)
image->units=(ResolutionType) option_type;
}
if (used_tiff == MagickFalse)
{
(void) DeleteImageProperty(image,"exif:XResolution");
(void) DeleteImageProperty(image,"exif:YResolution");
(void) DeleteImageProperty(image,"exif:ResolutionUnit");
}
else
{
(void) DeleteImageProperty(image,"tiff:XResolution");
(void) DeleteImageProperty(image,"tiff:YResolution");
(void) DeleteImageProperty(image,"tiff:ResolutionUnit");
}
}
}
MagickExport Image *ReadImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent],
magick[MagickPathExtent],
magick_filename[MagickPathExtent];
ConstituteInfo
constitute_info;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
DecodeImageHandler
*decoder;
ExceptionInfo
*sans_exception;
Image
*image,
*next;
ImageInfo
*read_info;
MagickBooleanType
status;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image_info->filename != (char *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent);
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) CopyMagickString(magick,read_info->magick,MagickPathExtent);
/*
Call appropriate image reader based on image type.
*/
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(read_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
InheritException(exception,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
read_info->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian :
MSBEndian;
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderSeekableStream(magick_info) != MagickFalse))
{
image=AcquireImage(read_info,exception);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
if (IsBlobSeekable(image) == MagickFalse)
{
/*
Coder requires a seekable stream.
*/
*read_info->filename='\0';
status=ImageToFile(image,read_info->filename,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
read_info->temporary=MagickTrue;
}
(void) CloseBlob(image);
image=DestroyImage(image);
}
image=NewImageList();
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(read_info->filename,filename,
MagickPathExtent);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
}
}
if (decoder != (DecodeImageHandler *) NULL)
{
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=decoder(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
if (read_info->temporary != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Let our decoding delegate process the image.
*/
image=AcquireImage(read_info,exception);
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
*read_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL,
exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
image=DestroyImageList(image);
read_info->temporary=MagickTrue;
if (status != MagickFalse)
(void) SetImageInfo(read_info,0,exception);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
if (IsPathAccessible(read_info->filename) != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
else
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=(decoder)(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
if (read_info->temporary != MagickFalse)
{
(void) RelinquishUniqueFileResource(read_info->filename);
read_info->temporary=MagickFalse;
if (image != (Image *) NULL)
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return(image);
}
if (exception->severity >= ErrorException)
(void) LogMagickEvent(ExceptionEvent,GetMagickModule(),
"Coder (%s) generated an image despite an error (%d), "
"notify the developers",image->magick,exception->severity);
if (IsBlobTemporary(image) != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) &&
(GetImageListLength(image) != 1))
{
Image
*clones;
clones=CloneImages(image,read_info->scenes,exception);
if (clones != (Image *) NULL)
{
image=DestroyImageList(image);
image=GetFirstImageInList(clones);
}
}
InitializeConstituteInfo(read_info,&constitute_info);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
char
magick_path[MagickPathExtent],
*property;
const StringInfo
*profile;
static const char
*source_date_epoch = (const char *) NULL;
static MagickBooleanType
epoch_initalized = MagickFalse;
next->taint=MagickFalse;
GetPathComponent(magick_filename,MagickPath,magick_path);
if ((*magick_path == '\0') && (*next->magick == '\0'))
(void) CopyMagickString(next->magick,magick,MagickPathExtent);
(void) CopyMagickString(next->magick_filename,magick_filename,
MagickPathExtent);
if (IsBlobTemporary(image) != MagickFalse)
(void) CopyMagickString(next->filename,filename,MagickPathExtent);
if (next->magick_columns == 0)
next->magick_columns=next->columns;
if (next->magick_rows == 0)
next->magick_rows=next->rows;
(void) GetImageProperty(next,"exif:*",exception);
(void) GetImageProperty(next,"icc:*",exception);
(void) GetImageProperty(next,"iptc:*",exception);
(void) GetImageProperty(next,"xmp:*",exception);
SyncOrientationFromProperties(next,&constitute_info,exception);
SyncResolutionFromProperties(next,&constitute_info,exception);
if (next->page.width == 0)
next->page.width=next->columns;
if (next->page.height == 0)
next->page.height=next->rows;
if (constitute_info.caption != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,
constitute_info.caption,exception);
(void) SetImageProperty(next,"caption",property,exception);
property=DestroyString(property);
}
if (constitute_info.comment != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,
constitute_info.comment,exception);
(void) SetImageProperty(next,"comment",property,exception);
property=DestroyString(property);
}
if (constitute_info.label != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,
constitute_info.label,exception);
(void) SetImageProperty(next,"label",property,exception);
property=DestroyString(property);
}
if (LocaleCompare(next->magick,"TEXT") == 0)
(void) ParseAbsoluteGeometry("0x0+0+0",&next->page);
if ((read_info->extract != (char *) NULL) &&
(read_info->stream == (StreamHandler) NULL))
{
RectangleInfo
geometry;
MagickStatusType
flags;
SetGeometry(next,&geometry);
flags=ParseAbsoluteGeometry(read_info->extract,&geometry);
if ((next->columns != geometry.width) ||
(next->rows != geometry.height))
{
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
Image
*crop_image;
crop_image=CropImage(next,&geometry,exception);
if (crop_image != (Image *) NULL)
ReplaceImageInList(&next,crop_image);
}
else
if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0))
{
Image
*size_image;
flags=ParseRegionGeometry(next,read_info->extract,&geometry,
exception);
size_image=ResizeImage(next,geometry.width,geometry.height,
next->filter,exception);
if (size_image != (Image *) NULL)
ReplaceImageInList(&next,size_image);
}
}
}
profile=GetImageProfile(next,"icc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"icm");
profile=GetImageProfile(next,"iptc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"8bim");
if (epoch_initalized == MagickFalse)
{
source_date_epoch=getenv("SOURCE_DATE_EPOCH");
epoch_initalized=MagickTrue;
}
if (source_date_epoch == (const char *) NULL)
{
char
timestamp[MagickTimeExtent];
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime,
sizeof(timestamp),timestamp);
(void) SetImageProperty(next,"date:modify",timestamp,exception);
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime,
sizeof(timestamp),timestamp);
(void) SetImageProperty(next,"date:create",timestamp,exception);
}
if (constitute_info.delay_flags != NoValue)
{
if ((constitute_info.delay_flags & GreaterValue) != 0)
{
if (next->delay > constitute_info.delay)
next->delay=constitute_info.delay;
}
else
if ((constitute_info.delay_flags & LessValue) != 0)
{
if (next->delay < constitute_info.delay)
next->delay=constitute_info.delay;
}
else
next->delay=constitute_info.delay;
if ((constitute_info.delay_flags & SigmaValue) != 0)
next->ticks_per_second=constitute_info.ticks_per_second;
}
if (constitute_info.dispose != (const char *) NULL)
{
ssize_t
option_type;
option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse,
constitute_info.dispose);
if (option_type >= 0)
next->dispose=(DisposeType) option_type;
}
if (read_info->verbose != MagickFalse)
(void) IdentifyImage(next,stderr,MagickFalse,exception);
image=next;
}
read_info=DestroyImageInfo(read_info);
if (GetBlobError(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImages() reads one or more images and returns them as an image list.
%
% The format of the ReadImage method is:
%
% Image *ReadImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
read_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Read image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
(void) SetImageOption(read_info,"filename",filename);
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(read_info,(Image *) NULL,filename,
(int) read_info->scene,read_filename,exception);
if (LocaleCompare(read_filename,read_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes != 0)
{
(void) CopyMagickString(read_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
scene=(ssize_t) read_info->scene;
for ( ; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,
read_filename,(int) scene,read_info->filename,exception);
image=ReadImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
}
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d I n l i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadInlineImage() reads a Base64-encoded inline image or image sequence.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadInlineImage method is:
%
% Image *ReadInlineImage(const ImageInfo *image_info,const char *content,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o content: the image encoded in Base64.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadInlineImage(const ImageInfo *image_info,
const char *content,ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*read_info;
unsigned char
*blob;
size_t
length;
const char
*p;
/*
Skip over header (e.g. data:image/gif;base64,).
*/
image=NewImageList();
for (p=content; (*p != ',') && (*p != '\0'); p++) ;
if (*p == '\0')
ThrowReaderException(CorruptImageError,"CorruptImage");
blob=Base64Decode(++p,&length);
if (length == 0)
{
blob=(unsigned char *) RelinquishMagickMemory(blob);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
read_info=CloneImageInfo(image_info);
(void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL,
(void *) NULL);
*read_info->filename='\0';
*read_info->magick='\0';
for (p=content; (*p != '/') && (*p != '\0'); p++) ;
if (*p != '\0')
{
char
*q;
ssize_t
i;
/*
Extract media type.
*/
if (LocaleNCompare(++p,"x-",2) == 0)
p+=2;
(void) strcpy(read_info->filename,"data.");
q=read_info->filename+5;
for (i=0; (*p != ';') && (*p != '\0') && (i < (MagickPathExtent-6)); i++)
*q++=(*p++);
*q++='\0';
}
image=BlobToImage(read_info,blob,length,exception);
blob=(unsigned char *) RelinquishMagickMemory(blob);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImage() writes an image or an image sequence to a file or file handle.
% If writing to a file is on disk, the name is defined by the filename member
% of the image structure. WriteImage() returns MagickFalse is there is a
% memory shortage or if the image cannot be written. Check the exception
% member of image to determine the cause for any failure.
%
% The format of the WriteImage method is:
%
% MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
const char
*option;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
EncodeImageHandler
*encoder;
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
status,
temporary;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
sans_exception=AcquireExceptionInfo();
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) SetImageInfo(write_info,1,sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
/*
Call appropriate image writer based on image type.
*/
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
image->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian;
}
}
(void) SyncImageProfiles(image);
DisassociateImageStream(image);
option=GetImageOption(image_info,"delegate:bimodal");
if ((IsStringTrue(option) != MagickFalse) &&
(write_info->page == (char *) NULL) &&
(GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) == (Image *) NULL) &&
(IsTaintImage(image) == MagickFalse) )
{
delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception);
if ((delegate_info != (const DelegateInfo *) NULL) &&
(GetDelegateMode(delegate_info) == 0) &&
(IsPathAccessible(image->magick_filename) != MagickFalse))
{
/*
Process image with bi-modal delegate.
*/
(void) CopyMagickString(image->filename,image->magick_filename,
MagickPathExtent);
status=InvokeDelegate(write_info,image,image->magick,
write_info->magick,exception);
write_info=DestroyImageInfo(write_info);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
return(status);
}
}
status=MagickFalse;
temporary=MagickFalse;
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderSeekableStream(magick_info) != MagickFalse))
{
char
image_filename[MagickPathExtent];
(void) CopyMagickString(image_filename,image->filename,MagickPathExtent);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
(void) CopyMagickString(image->filename, image_filename,MagickPathExtent);
if (status != MagickFalse)
{
if (IsBlobSeekable(image) == MagickFalse)
{
/*
A seekable stream is required by the encoder.
*/
write_info->adjoin=MagickTrue;
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) AcquireUniqueFilename(image->filename);
temporary=MagickTrue;
}
(void) CloseBlob(image);
}
}
encoder=GetImageEncoder(magick_info);
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception);
if (delegate_info != (DelegateInfo *) NULL)
{
/*
Process the image with delegate.
*/
*write_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(write_info,image,(char *) NULL,
write_info->magick,exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
else
{
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((write_info->affirm == MagickFalse) &&
(magick_info == (const MagickInfo *) NULL))
{
(void) CopyMagickString(write_info->magick,image->magick,
MagickPathExtent);
magick_info=GetMagickInfo(write_info->magick,exception);
}
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
{
char
extension[MagickPathExtent];
GetPathComponent(image->filename,ExtensionPath,extension);
if (*extension != '\0')
magick_info=GetMagickInfo(extension,exception);
else
magick_info=GetMagickInfo(image->magick,exception);
(void) CopyMagickString(image->filename,filename,
MagickPathExtent);
encoder=GetImageEncoder(magick_info);
}
if (encoder == (EncodeImageHandler *) NULL)
{
magick_info=GetMagickInfo(image->magick,exception);
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
}
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,
exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
}
}
if (temporary != MagickFalse)
{
/*
Copy temporary image file to permanent.
*/
status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception);
if (status != MagickFalse)
{
(void) RelinquishUniqueFileResource(write_info->filename);
status=ImageToFile(image,write_info->filename,exception);
}
(void) CloseBlob(image);
(void) RelinquishUniqueFileResource(image->filename);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
}
if ((LocaleCompare(write_info->magick,"info") != 0) &&
(write_info->verbose != MagickFalse))
(void) IdentifyImage(image,stdout,MagickFalse,exception);
write_info=DestroyImageInfo(write_info);
if (GetBlobError(image) != MagickFalse)
ThrowWriterException(FileOpenError,"UnableToWriteFile");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImages() writes an image sequence into one or more files. While
% WriteImage() can write an image sequence, it is limited to writing
% the sequence into a single file using a format which supports multiple
% frames. WriteImages(), however, does not have this limitation, instead it
% generates multiple output files if necessary (or when requested). When
% ImageInfo's adjoin flag is set to MagickFalse, the file name is expected
% to include a printf-style formatting string for the frame number (e.g.
% "image%02d.png").
%
% The format of the WriteImages method is:
%
% MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o images: the image list.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info,
Image *images,const char *filename,ExceptionInfo *exception)
{
#define WriteImageTag "Write/Image"
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
proceed;
MagickOffsetType
progress;
MagickProgressMonitor
progress_monitor;
MagickSizeType
number_images;
MagickStatusType
status;
Image
*p;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
write_info=CloneImageInfo(image_info);
*write_info->magick='\0';
images=GetFirstImageInList(images);
if (filename != (const char *) NULL)
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
(void) CopyMagickString(p->filename,filename,MagickPathExtent);
(void) CopyMagickString(write_info->filename,images->filename,
MagickPathExtent);
sans_exception=AcquireExceptionInfo();
(void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images),
sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent);
p=images;
for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p))
{
Image
*next;
next=GetNextImageInList(p);
if (next == (Image *) NULL)
break;
if (p->scene >= next->scene)
{
ssize_t
i;
/*
Generate consistent scene numbers.
*/
i=(ssize_t) images->scene;
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
p->scene=(size_t) i++;
break;
}
}
/*
Write images.
*/
status=MagickTrue;
progress_monitor=(MagickProgressMonitor) NULL;
progress=0;
number_images=GetImageListLength(images);
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (number_images != 1)
progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL,
p->client_data);
status&=WriteImage(write_info,p,exception);
if (number_images != 1)
(void) SetImageProgressMonitor(p,progress_monitor,p->client_data);
if (write_info->adjoin != MagickFalse)
break;
if (number_images != 1)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(p,WriteImageTag,progress,number_images);
if (proceed == MagickFalse)
break;
}
}
write_info=DestroyImageInfo(write_info);
return(status != 0 ? MagickTrue : MagickFalse);
}
|
GB_add_phase0.c | //------------------------------------------------------------------------------
// GB_add_phase0: find vectors of C to compute for C=A+B or C<M>=A+B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// The eWise add of two matrices, C=A+B, C<M>=A+B, or C<!M>=A+B starts with
// this phase, which determines which vectors of C need to be computed.
// This phase is also used for GB_masker.
// On input, A and B are the two matrices being added, and M is the optional
// mask matrix (not complemented). The complemented mask is handed in GB_mask,
// not here.
// The A matrix can be sparse, hypersparse, slice, or hyperslice. The B matrix
// can only be sparse or hypersparse. See GB_wait, which can pass in A as any
// of the four formats. In this case, no mask is present.
// On output, an integer (Cnvec) a boolean (Ch_to_Mh) and up to 3 arrays are
// returned, either NULL or of size Cnvec. Let n = A->vdim be the vector
// dimension of A, B, M and C.
// Ch: the list of vectors to compute. If not NULL, Ch [k] = j is the
// kth vector in C to compute, which will become the hyperlist C->h of C.
// Note that some of these vectors may turn out to be empty, because of
// the mask, or because the vector j appeared in A or B, but is empty.
// It is pruned at the end of GB_add_phase2. If Ch is NULL then it is an
// implicit list of size n, and Ch [k] == k for all k = 0:n-1. In this
// case, C will be a standard matrix, not hypersparse. Thus, the kth
// vector is j = (Ch == NULL) ? k : Ch [k].
// Ch is freed by GB_add if phase1 fails. phase2 either frees it or
// transplants it into C.
// Ch_is_Mh: true if the mask M is present, hypersparse, and not
// complemented, false otherwise. In this case Ch is a deep copy of Mh.
// Only GB_add uses this option; it is not used by GB_masker (Ch_is_Mh
// is always false for GB_masker). This is determined by passing in
// p_Ch_is_Mh as a NULL or non-NULL pointer.
// C_to_A: if A is hypersparse, then C_to_A [k] = kA if the kth vector, j
// = (Ch == NULL) ? k : Ch [k] appears in A, as j = Ah [kA]. If j does
// not appear in A, then C_to_A [k] = -1. If A is not hypersparse, then
// C_to_A is returned as NULL.
// C_to_B: if B is hypersparse, then C_to_B [k] = kB if the kth vector, j
// = (Ch == NULL) ? k : Ch [k] appears in B, as j = Bh [kB]. If j does
// not appear in B, then C_to_B [k] = -1. If B is not hypersparse, then
// C_to_B is returned as NULL.
// C_to_M: if M is hypersparse, and Ch_is_Mh is false, then C_to_M [k] =
// kM if the kth vector, j = (Ch == NULL) ? k : Ch [k] appears in M, as j
// = Mh [kM]. If j does not appear in M, then C_to_M [k] = -1. If M is
// not hypersparse, then C_to_M is returned as NULL.
#include "GB_add.h"
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (kA_start, ntasks+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (kB_start, ntasks+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (kC_start, ntasks+1, sizeof (int64_t)) ; \
}
//------------------------------------------------------------------------------
// GB_allocate_result
//------------------------------------------------------------------------------
static inline bool GB_allocate_result
(
int64_t Cnvec,
int64_t *restrict *Ch_handle,
int64_t *restrict *C_to_M_handle,
int64_t *restrict *C_to_A_handle,
int64_t *restrict *C_to_B_handle
)
{
bool ok = true ;
if (Ch_handle != NULL)
{
GB_MALLOC_MEMORY (*Ch_handle, Cnvec, sizeof (int64_t)) ;
ok = (*Ch_handle != NULL) ;
}
if (C_to_M_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_M_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_M_handle != NULL) ;
}
if (C_to_A_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_A_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_A_handle != NULL) ;
}
if (C_to_B_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_B_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_B_handle != NULL) ;
}
if (!ok)
{
// out of memory
if (Ch_handle != NULL)
{
GB_FREE_MEMORY (*Ch_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_M_handle != NULL)
{
GB_FREE_MEMORY (*C_to_M_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_A_handle != NULL)
{
GB_FREE_MEMORY (*C_to_A_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_B_handle != NULL)
{
GB_FREE_MEMORY (*C_to_B_handle, Cnvec, sizeof (int64_t)) ;
}
}
return (ok) ;
}
//------------------------------------------------------------------------------
// GB_add_phase0: find the vectors of C for C<M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB_add_phase0 // find vectors in C for C=A+B or C<M>=A+B
(
int64_t *p_Cnvec, // # of vectors to compute in C
int64_t *restrict *Ch_handle, // Ch: size Cnvec, or NULL
int64_t *restrict *C_to_M_handle, // C_to_M: size Cnvec, or NULL
int64_t *restrict *C_to_A_handle, // C_to_A: size Cnvec, or NULL
int64_t *restrict *C_to_B_handle, // C_to_B: of size Cnvec, or NULL
bool *p_Ch_is_Mh, // if true, then Ch == Mh
const GrB_Matrix M, // optional mask, may be NULL; not complemented
const GrB_Matrix A, // standard, hypersparse, slice, or hyperslice
const GrB_Matrix B, // standard or hypersparse; never a slice
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_Cnvec != NULL) ;
ASSERT (Ch_handle != NULL) ;
ASSERT (C_to_A_handle != NULL) ;
ASSERT (C_to_B_handle != NULL) ;
ASSERT_OK (GB_check (A, "A for add phase0", GB0)) ;
ASSERT_OK (GB_check (B, "B for add phase0", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (M, "M for add phase0", GB0)) ;
ASSERT (A->vdim == B->vdim) ;
ASSERT (GB_IMPLIES (M != NULL, A->vdim == M->vdim)) ;
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
int64_t *restrict Ch = NULL ;
int64_t *restrict C_to_M = NULL ;
int64_t *restrict C_to_A = NULL ;
int64_t *restrict C_to_B = NULL ;
(*Ch_handle) = NULL ;
(*C_to_A_handle) = NULL ;
(*C_to_B_handle) = NULL ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = NULL ;
}
int64_t *restrict kA_start = NULL ;
int64_t *restrict kB_start = NULL ;
int64_t *restrict kC_start = NULL ;
int ntasks = 0 ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = 1 ; // nthreads depends on Cnvec, computed below
//--------------------------------------------------------------------------
// get content of M, A, and B
//--------------------------------------------------------------------------
int64_t Cnvec ;
int64_t n = A->vdim ;
int64_t Anvec = A->nvec ;
bool A_is_hyper = A->is_hyper ;
bool A_is_slice = A->is_slice ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = (A_is_hyper) ? A->h : NULL ;
const int64_t A_hfirst = A->hfirst ;
#define GB_Ah(k) (A_is_hyper ? Ah [k] : (A_hfirst + (k)))
int64_t Bnvec = B->nvec ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
bool B_is_hyper = B->is_hyper ;
ASSERT (!B->is_slice) ;
int64_t Mnvec = 0 ;
const int64_t *restrict Mp = NULL ;
const int64_t *restrict Mh = NULL ;
bool M_is_hyper = false ;
if (M != NULL)
{
Mnvec = M->nvec ;
Mp = M->p ;
Mh = M->h ;
M_is_hyper = M->is_hyper ;
ASSERT (!M->is_slice) ;
}
// For GB_add, if M is present, hypersparse, and not complemented, then C
// will be hypersparse, and it will have set of vectors as M (Ch == Mh).
// For GB_masker, Ch is never equal to Mh.
bool Ch_is_Mh = (p_Ch_is_Mh != NULL) && (M != NULL && M_is_hyper) ;
//--------------------------------------------------------------------------
// find the set union of the non-empty vectors of A and B
//--------------------------------------------------------------------------
if (Ch_is_Mh)
{
//----------------------------------------------------------------------
// C is hypersparse, with the same vectors as the hypersparse M
//----------------------------------------------------------------------
// This step is done for GB_add only, not GB_masker.
// GB_wait is the only place where A may be a slice, and it does not
// use a mask. So this phase can ignore the case where A is a slice.
Cnvec = Mnvec ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
ASSERT (!A_is_slice) ;
if (!GB_allocate_result (Cnvec, &Ch, NULL,
(A_is_hyper) ? (&C_to_A) : NULL, (B_is_hyper) ? (&C_to_B) : NULL))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
// copy Mh into Ch. Ch is Mh so C_to_M is not needed.
GB_memcpy (Ch, Mh, Mnvec * sizeof (int64_t), nthreads) ;
// construct the mapping from C to A and B, if they are hypersparse
if (A_is_hyper || B_is_hyper)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < Cnvec ; k++)
{
int64_t j = Ch [k] ;
if (A_is_hyper)
{
// C_to_A [k] = kA if Ah [kA] == j and A(:,j) is non-empty
int64_t kA = 0, pA, pA_end ;
GB_lookup (true, Ah, Ap, &kA, Anvec-1, j, &pA, &pA_end) ;
C_to_A [k] = (pA < pA_end) ? kA : -1 ;
}
if (B_is_hyper)
{
// C_to_B [k] = kB if Bh [kB] == j and B(:,j) is non-empty
int64_t kB = 0, pB, pB_end ;
GB_lookup (true, Bh, Bp, &kB, Bnvec-1, j, &pB, &pB_end) ;
C_to_B [k] = (pB < pB_end) ? kB : -1 ;
}
}
}
}
else if ((A_is_hyper || A_is_slice) && B_is_hyper)
{
//----------------------------------------------------------------------
// A is hypersparse or a hyperslice, and B is hypersparse
//----------------------------------------------------------------------
// Ch is the set union of Ah and Bh. This is handled with a parallel
// merge, since Ah and Bh are both sorted lists.
//----------------------------------------------------------------------
// phase 0: create the tasks
//----------------------------------------------------------------------
double work = GB_IMIN (Anvec + Bnvec, n) ;
nthreads = GB_nthreads (work, chunk, nthreads_max) ;
ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
ntasks = GB_IMIN (ntasks, work) ;
// allocate workspace
GB_MALLOC_MEMORY (kA_start, ntasks+1, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (kB_start, ntasks+1, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (kC_start, ntasks+1, sizeof (int64_t)) ;
if (kA_start == NULL || kB_start == NULL || kC_start == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
kA_start [0] = (Anvec == 0) ? -1 : 0 ;
kB_start [0] = (Bnvec == 0) ? -1 : 0 ;
kA_start [ntasks] = (Anvec == 0) ? -1 : Anvec ;
kB_start [ntasks] = (Bnvec == 0) ? -1 : Bnvec ;
for (int taskid = 1 ; taskid < ntasks ; taskid++)
{
// create tasks: A and B are both hyper
double target_work = ((ntasks-taskid) * work) / ntasks ;
GB_slice_vector (NULL, NULL,
&(kA_start [taskid]), &(kB_start [taskid]),
0, 0, NULL, // Mi not present
0, Anvec, Ah, A_hfirst, // Ah, explicit or implicit list
0, Bnvec, Bh, // Bh, explicit list
n, // Ah and Bh have dimension n
target_work) ;
}
//----------------------------------------------------------------------
// phase 1: count the entries in the result of each task
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule (dynamic,1)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
// merge Ah and Bh into Ch
int64_t kA = kA_start [taskid] ;
int64_t kB = kB_start [taskid] ;
int64_t kA_end = kA_start [taskid+1] ;
int64_t kB_end = kB_start [taskid+1] ;
int64_t kC = 0 ;
for ( ; kA < kA_end && kB < kB_end ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// jA appears in A but not B
kA++ ;
}
else if (jB < jA)
{
// jB appears in B but not A
kB++ ;
}
else
{
// j = jA = jB appears in both A and B
kA++ ;
kB++ ;
}
}
kC_start [taskid] = kC + (kA_end - kA) + (kB_end - kB) ;
}
//----------------------------------------------------------------------
// phase 1b: cumulative sum of entries for each task
//----------------------------------------------------------------------
GB_cumsum (kC_start, ntasks, NULL, 1) ;
Cnvec = kC_start [ntasks] ;
//----------------------------------------------------------------------
// allocate the result
//----------------------------------------------------------------------
// C will be hypersparse, so Ch is allocated. The mask M is ignored
// for computing Ch. Ch is the set union of Ah and Bh.
if (!GB_allocate_result (Cnvec, &Ch,
(M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, &C_to_B))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// phase 2: compute the result
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule (dynamic,1)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
// merge Ah and Bh into Ch
int64_t kA = kA_start [taskid] ;
int64_t kB = kB_start [taskid] ;
int64_t kC = kC_start [taskid] ;
int64_t kA_end = kA_start [taskid+1] ;
int64_t kB_end = kB_start [taskid+1] ;
// merge Ah and Bh into Ch
for ( ; kA < kA_end && kB < kB_end ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// append jA to Ch
Ch [kC] = jA ;
C_to_A [kC] = kA++ ;
C_to_B [kC] = -1 ; // jA does not appear in B
}
else if (jB < jA)
{
// append jB to Ch
Ch [kC] = jB ;
C_to_A [kC] = -1 ; // jB does not appear in A
C_to_B [kC] = kB++ ;
}
else
{
// j appears in both A and B; append it to Ch
Ch [kC] = jA ;
C_to_A [kC] = kA++ ;
C_to_B [kC] = kB++ ;
}
}
if (kA < kA_end)
{
// B is exhausted but A is not
for ( ; kA < kA_end ; kA++, kC++)
{
// append jA to Ch
int64_t jA = GB_Ah (kA) ;
Ch [kC] = jA ;
C_to_A [kC] = kA ;
C_to_B [kC] = -1 ;
}
}
else if (kB < kB_end)
{
// A is exhausted but B is not
for ( ; kB < kB_end ; kB++, kC++)
{
// append jB to Ch
int64_t jB = Bh [kB] ;
Ch [kC] = jB ;
C_to_A [kC] = -1 ;
C_to_B [kC] = kB ;
}
}
ASSERT (kC == kC_start [taskid+1]) ;
}
//----------------------------------------------------------------------
// check result via a sequential merge
//----------------------------------------------------------------------
#ifdef GB_DEBUG
// merge Ah and Bh into Ch
int64_t kA = 0 ;
int64_t kB = 0 ;
int64_t kC = 0 ;
for ( ; kA < Anvec && kB < Bnvec ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// append jA to Ch
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ; kA++ ;
ASSERT (C_to_B [kC] == -1) ; // jA does not appear in B
}
else if (jB < jA)
{
// append jB to Ch
ASSERT (Ch [kC] == jB) ;
ASSERT (C_to_A [kC] == -1) ; // jB does not appear in A
ASSERT (C_to_B [kC] == kB) ; kB++ ;
}
else
{
// j appears in both A and B; append it to Ch
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ; kA++ ;
ASSERT (C_to_B [kC] == kB) ; kB++ ;
}
}
if (kA < Anvec)
{
// B is exhausted but A is not
for ( ; kA < Anvec ; kA++, kC++)
{
// append jA to Ch
int64_t jA = GB_Ah (kA) ;
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ;
ASSERT (C_to_B [kC] == -1) ;
}
}
else if (kB < Bnvec)
{
// A is exhausted but B is not
for ( ; kB < Bnvec ; kB++, kC++)
{
// append jB to Ch
int64_t jB = Bh [kB] ;
ASSERT (Ch [kC] == jB) ;
ASSERT (C_to_A [kC] == -1) ;
ASSERT (C_to_B [kC] == kB) ;
}
}
ASSERT (kC == Cnvec) ;
#endif
}
else if ((A_is_hyper || A_is_slice) && !B_is_hyper)
{
//----------------------------------------------------------------------
// A is hypersparse, B is standard
//----------------------------------------------------------------------
// C will be standard. Construct the C_to_A mapping.
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, NULL))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t j = 0 ; j < n ; j++)
{
C_to_A [j] = -1 ;
}
// scatter Ah into C_to_A
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t kA = 0 ; kA < Anvec ; kA++)
{
int64_t jA = GB_Ah (kA) ;
C_to_A [jA] = kA ;
}
}
else if (!(A_is_hyper || A_is_slice) && B_is_hyper)
{
//----------------------------------------------------------------------
// A is standard, B is hypersparse
//----------------------------------------------------------------------
// C will be standard. Construct the C_to_B mapping.
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, NULL, &C_to_B))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t j = 0 ; j < n ; j++)
{
C_to_B [j] = -1 ;
}
// scatter Bh into C_to_B
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t kB = 0 ; kB < Bnvec ; kB++)
{
int64_t jB = Bh [kB] ;
C_to_B [jB] = kB ;
}
}
else
{
//----------------------------------------------------------------------
// A and B are both standard
//----------------------------------------------------------------------
// C will be standard
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, NULL, NULL))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// construct C_to_M if needed
//--------------------------------------------------------------------------
if (C_to_M != NULL)
{
if (Ch != NULL)
{
// C is hypersparse
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < Cnvec ; k++)
{
int64_t j = Ch [k] ;
// C_to_M [k] = kM if Mh [kM] == j and M(:,j) is non-empty
int64_t kM = 0, pM, pM_end ;
GB_lookup (true, Mh, Mp, &kM, Mnvec-1, j, &pM, &pM_end) ;
C_to_M [k] = (pM < pM_end) ? kM : -1 ;
}
}
else
{
// C is standard
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t j = 0 ; j < n ; j++)
{
C_to_M [j] = -1 ;
}
// scatter Mh into C_to_M
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t kM = 0 ; kM < Mnvec ; kM++)
{
int64_t jM = Mh [kM] ;
C_to_M [jM] = kM ;
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
(*p_Cnvec ) = Cnvec ;
if (p_Ch_is_Mh != NULL)
{
// return Ch_is_Mh to GB_add. For GB_masker, Ch is never Mh.
(*p_Ch_is_Mh) = Ch_is_Mh ;
}
(*Ch_handle ) = Ch ;
(*C_to_A_handle) = C_to_A ;
(*C_to_B_handle) = C_to_B ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = C_to_M ;
}
//--------------------------------------------------------------------------
// The code below describes what the output contains:
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
ASSERT (A != NULL) ; // A and B are always present
ASSERT (B != NULL) ;
int64_t jlast = -1 ;
for (int64_t k = 0 ; k < Cnvec ; k++)
{
// C(:,j) is in the list, as the kth vector
int64_t j ;
if (Ch == NULL)
{
// C will be constructed as standard sparse
j = k ;
}
else
{
// C will be constructed as hypersparse
j = Ch [k] ;
}
// vectors j in Ch are sorted, and in the range 0:n-1
ASSERT (j >= 0 && j < n) ;
ASSERT (j > jlast) ;
jlast = j ;
// see if A (:,j) exists
if (C_to_A != NULL)
{
// A is hypersparse, or a slice
ASSERT (A->is_hyper || A->is_slice) ;
int64_t kA = C_to_A [k] ;
ASSERT (kA >= -1 && kA < A->nvec) ;
if (kA >= 0)
{
int64_t jA = GB_Ah (kA) ;
ASSERT (j == jA) ;
}
}
else
{
// A is in standard sparse form
// C_to_A exists only if A is hypersparse
ASSERT (!(A->is_hyper || A->is_slice)) ;
}
// see if B (:,j) exists
if (C_to_B != NULL)
{
// B is hypersparse
ASSERT (B->is_hyper) ;
int64_t kB = C_to_B [k] ;
ASSERT (kB >= -1 && kB < B->nvec) ;
if (kB >= 0)
{
int64_t jB = B->h [kB] ;
ASSERT (j == jB) ;
}
}
else
{
// B is in standard sparse form
// C_to_B exists only if B is hypersparse
ASSERT (!B->is_hyper) ;
}
// see if M (:,j) exists
if (Ch_is_Mh)
{
// Ch is the same as Mh
ASSERT (M != NULL) ;
ASSERT (M->is_hyper) ;
ASSERT (Ch != NULL && M->h != NULL && Ch [k] == M->h [k]) ;
ASSERT (C_to_M == NULL) ;
}
else if (C_to_M != NULL)
{
// M is present and hypersparse
ASSERT (M != NULL) ;
ASSERT (M->is_hyper) ;
int64_t kM = C_to_M [k] ;
ASSERT (kM >= -1 && kM < M->nvec) ;
if (kM >= 0)
{
int64_t jM = M->h [kM] ;
ASSERT (j == jM) ;
}
}
else
{
// M is not present, or in standard form
ASSERT (M == NULL || !(M->is_hyper)) ;
}
}
#endif
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
}
|
ten_tusscher_2004_epi_S3_4.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S3_4.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7222555857775,0.00124832587159023,0.783462712179595,0.783274260125311,0.000170985504910724,0.486692890449150,0.00290823499179711,0.999998398101156,1.88451728840305e-08,1.85263533592764e-05,0.999776650901527,1.00720674152297,0.999996768057829,4.25428531899888e-05,0.238365528186353,10.2732995493148,139.602683230087};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.6680147002871,0.000430530939075394,0.000127852575589828,0.000356808091092550,0.263201677596404,0.118395256371924,0.182181143961952,5.07547129145478,0.0155941993462387,1.84656256314423,1088.39960685796,0.000350028084311740,0.555667945632962,0.00905447310372078,0.00477222574684051,6.97994762709882e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
GB_binop__times_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__times_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__times_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__times_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int64)
// A*D function (colscale): GB (_AxD__times_int64)
// D*A function (rowscale): GB (_DxB__times_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__times_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__times_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int64)
// C=scalar+B GB (_bind1st__times_int64)
// C=scalar+B' GB (_bind1st_tran__times_int64)
// C=A+scalar GB (_bind2nd__times_int64)
// C=A'+scalar GB (_bind2nd_tran__times_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_INT64 || GxB_NO_TIMES_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ChMatrix.h | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All right reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Alessandro Tasora, Radu Serban
// =============================================================================
#ifndef CHMATRIX_H
#define CHMATRIX_H
#include <immintrin.h>
#include "chrono/core/ChCoordsys.h"
#include "chrono/core/ChException.h"
#include "chrono/ChConfig.h"
#include "chrono/serialization/ChArchive.h"
#include "chrono/serialization/ChArchiveAsciiDump.h"
namespace chrono {
//
// FAST MACROS TO SPEEDUP CODE
//
#define Set33Element(a, b, val) SetElementN(((a * 3) + (b)), val)
#define Get33Element(a, b) GetElementN((a * 3) + (b))
#define Set34Element(a, b, val) SetElementN(((a * 4) + (b)), val)
#define Get34Element(a, b) GetElementN((a * 4) + (b))
#define Set34Row(ma, a, val0, val1, val2, val3) \
ma.SetElementN((a * 4), val0); \
ma.SetElementN((a * 4) + 1, val1); \
ma.SetElementN((a * 4) + 2, val2); \
ma.SetElementN((a * 4) + 3, val3);
#define Set44Element(a, b, val) SetElementN(((a * 4) + (b)), val)
#define Get44Element(a, b) GetElementN((a * 4) + (b))
// forward declaration
template <class Real = double>
class ChMatrixDynamic;
///
/// ChMatrix:
///
/// A base class for matrix objects (tables of NxM numbers).
/// To access elements, the indexes start from zero, and
/// you must indicate first row, then column, that is: m(2,4)
/// means the element at 3rd row, 5th column.
/// This is an abstract class, so you cannot instantiate
/// objects from it: you must rather create matrices using the
/// specialized child classes like ChMatrixDynamic, ChMatrixNM
/// ChMatrix33 and so on; all of them have this same base class.
/// Warning: for optimization reasons, not all functions will
/// check about boundaries of element indexes and matrix sizes (in
/// some cases, if sizes are wrong, debug asserts are used).
///
/// Further info at the @ref mathematical_objects manual page.
template <class Real = double>
class ChMatrix {
protected:
//
// DATA
//
int rows;
int columns;
Real* address;
public:
//
// CONSTRUCTORS (none - abstract class that must be implemented with child classes)
//
virtual ~ChMatrix() {}
//
// OPERATORS OVERLOADING
//
/// Parenthesis () operator, to access a single element of the matrix, by
/// supplying the row and the column (indexes start from 0).
/// For example: m(3,5) gets the element at the 4th row, 6th column.
/// Value is returned by reference, so it can be modified, like in m(1,2)=10.
Real& operator()(const int row, const int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
const Real& operator()(const int row, const int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
/// Parenthesis () operator, to access a single element of the matrix, by
/// supplying the ordinal of the element (indexes start from 0).
/// For example: m(3) gets the 4th element, counting row by row.
/// Mostly useful if the matrix is Nx1 sized (i.e. a N-element vector).
/// Value is returned by reference, so it can be modified, like in m(1,2)=10.
Real& operator()(const int el) {
assert(el >= 0 && el < rows * columns);
return (*(address + el));
}
const Real& operator()(const int el) const {
assert(el >= 0 && el < rows * columns);
return (*(address + el));
}
/// The [] operator returns the address of the n-th row. This is mostly
/// for compatibility with old matrix programming styles (2d array-like)
/// where to access an element at row i, column j, one can write mymatrix[i][j].
Real* operator[](const int row) {
assert(row >= 0 && row < rows);
return ((address + (row * columns)));
}
const Real* operator[](const int row) const {
assert(row >= 0 && row < rows);
return ((address + (row * columns)));
}
/// Multiplies this matrix by a factor, in place
ChMatrix<Real>& operator*=(const Real factor) {
MatrScale(factor);
return *this;
}
/// Increments this matrix by another matrix, in place
template <class RealB>
ChMatrix<Real>& operator+=(const ChMatrix<RealB>& matbis) {
MatrInc(matbis);
return *this;
}
/// Decrements this matrix by another matrix, in place
template <class RealB>
ChMatrix<Real>& operator-=(const ChMatrix<RealB>& matbis) {
MatrDec(matbis);
return *this;
}
/// Matrices are equal?
bool operator==(const ChMatrix<Real>& other) { return Equals(other); }
/// Matrices are not equal?
bool operator!=(const ChMatrix<Real>& other) { return !Equals(other); }
/// Assignment operator
ChMatrix<Real>& operator=(const ChMatrix<Real>& matbis) {
if (&matbis != this)
CopyFromMatrix(matbis);
return *this;
}
template <class RealB>
ChMatrix<Real>& operator=(const ChMatrix<RealB>& matbis) {
CopyFromMatrix(matbis);
return *this;
}
//
// FUNCTIONS
//
/// Sets the element at row,col position. Indexes start with zero.
void SetElement(int row, int col, Real elem) {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
*(address + col + (row * columns)) = elem;
}
/// Gets the element at row,col position. Indexes start with zero.
/// The return value is a copy of original value. Use Element() instead if you
/// want to access directly by reference the original element.
Real GetElement(int row, int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
return (*(address + col + (row * columns)));
}
Real GetElement(int row, int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
return (*(address + col + (row * columns)));
}
/// Sets the Nth element, counting row after row.
void SetElementN(int index, Real elem) {
assert(index >= 0 && index < (rows * columns)); // boundary checks
*(address + index) = elem;
}
/// Gets the Nth element, counting row after row.
Real GetElementN(int index) {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
const Real GetElementN(int index) const {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
/// Access a single element of the matrix, by
/// supplying the row and the column (indexes start from 0).
/// Value is returned by reference, so it can be modified, like in m.Element(1,2)=10.
Real& Element(int row, int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
const Real& Element(int row, int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
/// Access a single element of the matrix, the Nth element, counting row after row.
/// Value is returned by reference, so it can be modified, like in m.Element(5)=10.
Real& ElementN(int index) {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
const Real& ElementN(int index) const {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
/// Access directly the "Real* address" buffer. Warning! this is a low level
/// function, it should be used in rare cases, if really needed!
Real* GetAddress() { return address; }
const Real* GetAddress() const { return address; }
/// Gets the number of rows
int GetRows() const { return rows; }
/// Gets the number of columns
int GetColumns() const { return columns; }
/// Reallocate memory for a new size. VIRTUAL! Must be implemented by child classes!
virtual void Resize(int nrows, int ncols) {}
/// Swaps the columns a and b
void SwapColumns(int a, int b) {
Real temp;
for (int i = 0; i < rows; i++) {
temp = GetElement(i, a);
SetElement(i, a, GetElement(i, b));
SetElement(i, b, temp);
}
}
/// Swap the rows a and b
void SwapRows(int a, int b) {
Real temp;
for (int i = 0; i < columns; i++) {
temp = GetElement(a, i);
SetElement(a, i, GetElement(b, i));
SetElement(b, i, temp);
}
}
/// Fill the diagonal elements, given a sample.
/// Note that the matrix must already be square (no check for
/// rectangular matrices!), and the extradiagonal elements are
/// not modified -this function does not set them to 0-
void FillDiag(Real sample) {
for (int i = 0; i < rows; ++i)
SetElement(i, i, sample);
}
/// Fill the matrix with the same value in all elements
void FillElem(Real sample) {
for (int i = 0; i < rows * columns; ++i)
SetElementN(i, sample);
}
/// Fill the matrix with random float numbers, falling within the
/// "max"/"min" range.
void FillRandom(Real max, Real min) {
for (int i = 0; i < rows * columns; ++i)
SetElementN(i, min + (Real)ChRandom() * (max - min));
}
/// Resets the matrix to zero (warning: simply sets memory to 0 bytes!)
void Reset() {
// SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns);
for (int i = 0; i < rows * columns; ++i)
this->address[i] = 0;
}
/// Reset to zeroes and (if needed) changes the size to have row and col
void Reset(int nrows, int ncols) {
Resize(nrows, ncols);
// SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns);
for (int i = 0; i < rows * columns; ++i)
this->address[i] = 0;
}
/// Reset to identity matrix (ones on diagonal, zero elsewhere)
void SetIdentity() {
Reset();
FillDiag(1.0);
}
/// Copy a matrix "matra" into this matrix. Note that
/// the destination matrix will be resized if necessary.
template <class RealB>
void CopyFromMatrix(const ChMatrix<RealB>& matra) {
Resize(matra.GetRows(), matra.GetColumns());
// ElementsCopy(address, matra.GetAddress(), rows*columns);
// memcpy (address, matra.address, (sizeof(Real) * rows * columns));
for (int i = 0; i < rows * columns; ++i)
address[i] = (Real)matra.GetAddress()[i];
}
/// Copy the transpose of matrix "matra" into this matrix. Note that
/// the destination matrix will be resized if necessary.
template <class RealB>
void CopyFromMatrixT(const ChMatrix<RealB>& matra) {
Resize(matra.GetColumns(), matra.GetRows());
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
SetElement(j, i, (Real)matra.Element(i, j));
}
/// Copy the transposed upper triangular part of "matra" in the lower triangular
/// part of this matrix. (matra must be square)
/// Note that the destination matrix will be resized if necessary.
template <class RealB> // _______ //
void CopyTUpMatrix(const ChMatrix<RealB>& matra) // \ | |\ //
{ // \ A'| ---> | \ //
Resize(matra.GetRows(), matra.GetColumns()); // \ | |this\ //
for (int i = 0; i < matra.GetRows(); i++) { // \| |______\ //
for (int j = 0; j < matra.GetRows(); j++)
SetElement(j, i, (Real)matra.GetElement(i, j));
}
}
/// Copy the transposed lower triangulat part of "matra" in the upper triangular
/// part of this matrix. (matra must be square)
/// Note that the destination matrix will be resized if necessary.
template <class RealB> // _______ //
void CopyTLwMatrix(const ChMatrix<RealB>& matra) // |\ \ | //
{ // | \ ---> \this| //
Resize(matra.GetRows(), matra.GetColumns()); // |A' \ \ | //
for (int i = 0; i < matra.GetRows(); i++) { // |______\ \| //
for (int j = 0; j < matra.GetRows(); j++)
SetElement(i, j, (Real)matra.GetElement(j, i));
}
}
//
// STREAMING
//
/// Method to allow serialization of transient data in archives.
virtual void ArchiveOUT(ChArchiveOut& marchive) {
// suggested: use versioning
marchive.VersionWrite(1);
// stream out all member data
marchive << make_ChNameValue("rows", rows);
marchive << make_ChNameValue("columns", columns);
// custom output of matrix data as array
if (ChArchiveAsciiDump* mascii = dynamic_cast<ChArchiveAsciiDump*>(&marchive)) {
// CUSTOM row x col 'intuitive' table-like log when using ChArchiveAsciiDump:
for (int i = 0; i < rows; i++) {
mascii->indent();
for (int j = 0; j < columns; j++) {
mascii->GetStream()->operator<<(Element(i, j));
mascii->GetStream()->operator<<(", ");
}
mascii->GetStream()->operator<<("\n");
}
} else {
// NORMAL array-based serialization:
int tot_elements = GetRows() * GetColumns();
marchive.out_array_pre("data", tot_elements, typeid(Real).name());
for (int i = 0; i < tot_elements; i++) {
marchive << CHNVP(ElementN(i), "");
marchive.out_array_between(tot_elements, typeid(Real).name());
}
marchive.out_array_end(tot_elements, typeid(Real).name());
}
}
/// Method to allow de serialization of transient data from archives.
virtual void ArchiveIN(ChArchiveIn& marchive) {
// suggested: use versioning
int version = marchive.VersionRead();
// stream in all member data
int m_row, m_col;
marchive >> make_ChNameValue("rows", m_row);
marchive >> make_ChNameValue("columns", m_col);
Reset(m_row, m_col);
// custom input of matrix data as array
size_t tot_elements = GetRows() * GetColumns();
marchive.in_array_pre("data", tot_elements);
for (int i = 0; i < tot_elements; i++) {
marchive >> CHNVP(ElementN(i));
marchive.in_array_between("data");
}
marchive.in_array_end("data");
}
/// Method to allow serializing transient data into in ascii
/// as a readable item, for example "chrono::GetLog() << myobject;"
/// ***OBSOLETE***
void StreamOUT(ChStreamOutAscii& mstream) {
mstream << "\n"
<< "Matrix " << GetRows() << " rows, " << GetColumns() << " columns."
<< "\n";
for (int i = 0; i < ChMin(GetRows(), 8); i++) {
for (int j = 0; j < ChMin(GetColumns(), 8); j++)
mstream << GetElement(i, j) << " ";
if (GetColumns() > 8)
mstream << "...";
mstream << "\n";
}
if (GetRows() > 8)
mstream << "... \n\n";
}
/// Method to allow serializing transient data into an ascii stream (ex. a file)
/// as a Matlab .dat file (all numbers in a row, separated by space, then CR)
void StreamOUTdenseMatlabFormat(ChStreamOutAscii& mstream) {
for (int ii = 0; ii < this->GetRows(); ii++) {
for (int jj = 0; jj < this->GetColumns(); jj++) {
mstream << this->GetElement(ii, jj);
if (jj < (this->GetColumns() - 1))
mstream << " ";
}
mstream << "\n";
}
}
//
// MATH MEMBER FUNCTIONS.
// For speed reasons, sometimes size checking of operands is left to the user!
//
/// Changes the sign of all the elements of this matrix, in place.
void MatrNeg() {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = -ElementN(nel);
}
/// Sum two matrices, and stores the result in "this" matrix: [this]=[A]+[B].
template <class RealB, class RealC>
void MatrAdd(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows());
assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows());
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = (Real)(matra.ElementN(nel) + matrb.ElementN(nel));
}
/// Subtract two matrices, and stores the result in "this" matrix: [this]=[A]-[B].
template <class RealB, class RealC>
void MatrSub(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows());
assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows());
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = (Real)(matra.ElementN(nel) - matrb.ElementN(nel));
}
/// Increments this matrix with another matrix A, as: [this]+=[A]
template <class RealB>
void MatrInc(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) += (Real)matra.ElementN(nel);
}
/// Decrements this matrix with another matrix A, as: [this]-=[A]
template <class RealB>
void MatrDec(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) -= (Real)matra.ElementN(nel);
}
/// Scales a matrix, multiplying all elements by a constant value: [this]*=f
void MatrScale(Real factor) {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) *= factor;
}
/// Scales a matrix, multiplying all element by all oter elements of
/// matra (it is not the classical matrix multiplication!)
template <class RealB>
void MatrScale(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) *= (Real)matra.ElementN(nel);
}
/// Scales a matrix, dividing all elements by a constant value: [this]/=f
void MatrDivScale(Real factor) {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) /= factor;
}
/// Scales a matrix, dividing all element by all oter elements of
/// matra (it is not the classical matrix multiplication!)
template <class RealB>
void MatrDivScale(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) /= (Real)matra.ElementN(nel);
}
/// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B].
template <class RealB, class RealC>
void MatrMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetRows());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetColumns());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetColumns(); ++colres) {
for (row = 0; row < matra.GetRows(); ++row) {
sum = 0;
for (col = 0; col < matra.GetColumns(); ++col)
sum += (Real)(matra.Element(row, col) * matrb.Element(col, colres));
SetElement(row, colres, sum);
}
}
}
#ifdef CHRONO_HAS_AVX
/// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B].
/// AVX implementation: The speed up is marginal if size of the matrices are small, e.g. 3*3
/// Generally, as the matra.GetColumns() increases the method performs better
void MatrMultiplyAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) {
assert(matra.GetColumns() == matrb.GetRows());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetColumns());
int A_Nrow = matra.GetRows();
int B_Nrow = matrb.GetRows();
int A_NCol = matra.GetColumns();
int B_NCol = matrb.GetColumns();
const double* A_add = matra.GetAddress();
const double* B_add = matrb.GetAddress();
double* this_Add = this->GetAddress();
for (int rowA = 0; rowA < A_Nrow; rowA++) {
for (int colB = 0; colB < B_NCol; colB += 4) {
__m256d sum = _mm256_setzero_pd();
for (int elem = 0; elem < A_NCol; elem++) {
__m256d ymmA = _mm256_broadcast_sd(A_add + A_NCol * rowA + elem);
__m256d ymmB = _mm256_loadu_pd(B_add + elem * B_NCol + colB);
__m256d prod = _mm256_mul_pd(ymmA, ymmB);
sum = _mm256_add_pd(sum, prod);
}
_mm256_storeu_pd(this_Add + rowA * B_NCol + colB, sum);
}
}
}
/// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]'
/// Note: This method is faster than MatrMultiplyT if matra.GetColumns()%4=0 && matra.GetColumns()>8
/// It is still fast if matra.GetColumns() is large enough even if matra.GetColumns()%4!=0
void MatrMultiplyTAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns());
assert(this->GetRows() == matra.GetRows());
assert(this->GetColumns() == matrb.GetRows());
int A_Nrow = matra.GetRows();
int B_Nrow = matrb.GetRows();
int A_NCol = matra.GetColumns();
int B_NCol = matrb.GetColumns();
const double* A_add = matra.GetAddress();
const double* B_add = matrb.GetAddress();
bool NeedsPadding = (B_NCol % 4 != 0);
int CorrectFAT = ((B_NCol >> 2) << 2);
for (int rowA = 0; rowA < A_Nrow; rowA++) {
for (int rowB = 0; rowB < B_Nrow; rowB++) {
int colB;
double temp_sum = 0.0;
__m256d sum = _mm256_setzero_pd();
for (colB = 0; colB < CorrectFAT; colB += 4) {
__m256d ymmA = _mm256_loadu_pd(A_add + rowA * A_NCol + colB);
__m256d ymmB = _mm256_loadu_pd(B_add + rowB * B_NCol + colB);
__m256d prod = _mm256_mul_pd(ymmA, ymmB);
sum = _mm256_add_pd(sum, prod);
}
sum = _mm256_hadd_pd(sum, sum);
temp_sum = ((double*)&sum)[0] + ((double*)&sum)[2];
if (NeedsPadding)
for (colB = CorrectFAT; colB < B_NCol; colB++) {
temp_sum += (matra.Element(rowA, colB) * matrb.Element(rowB, colB));
}
SetElement(rowA, rowB, temp_sum);
}
}
}
#endif
/// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]'
/// Faster than doing B.MatrTranspose(); result.MatrMultiply(A,B);
/// Note: no check on mistaken size of this!
template <class RealB, class RealC>
void MatrMultiplyT(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetRows());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetRows(); ++colres) {
for (row = 0; row < matra.GetRows(); ++row) {
sum = 0;
for (col = 0; col < matra.GetColumns(); ++col)
sum += (Real)(matra.Element(row, col) * matrb.Element(colres, col));
SetElement(row, colres, sum);
}
}
}
/// Multiplies two matrices (the first is considered transposed): [this]=[A]'*[B]
/// Faster than doing A.MatrTranspose(); result.MatrMultiply(A,B);
template <class RealB, class RealC>
void MatrTMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetRows() == matrb.GetRows());
assert(this->rows == matra.GetColumns());
assert(this->columns == matrb.GetColumns());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetColumns(); ++colres) {
for (row = 0; row < matra.GetColumns(); ++row) {
sum = 0;
for (col = 0; col < (matra.GetRows()); ++col)
sum += (Real)(matra.Element(col, row) * matrb.Element(col, colres));
SetElement(row, colres, sum);
}
}
}
/// Computes dot product between two column-matrices (vectors) with
/// same size. Returns a scalar value.
template <class RealB, class RealC>
static Real MatrDot(const ChMatrix<RealB>& ma, const ChMatrix<RealC>& mb) {
assert(ma.GetColumns() == mb.GetColumns() && ma.GetRows() == mb.GetRows());
Real tot = 0;
for (int i = 0; i < ma.GetRows(); ++i)
tot += (Real)(ma.ElementN(i) * mb.ElementN(i));
return tot;
}
/// Transpose this matrix in place
void MatrTranspose() {
if (columns == rows) // Square transp.is optimized
{
for (int row = 0; row < rows; ++row)
for (int col = row; col < columns; ++col)
if (row != col) {
Real temp = Element(row, col);
Element(row, col) = Element(col, row);
Element(col, row) = temp;
}
int tmpr = rows;
rows = columns;
columns = tmpr;
} else // Naive implementation for rectangular case. Not in-place. Slower.
{
ChMatrixDynamic<Real> matrcopy(*this);
int tmpr = rows;
rows = columns;
columns = tmpr; // dont' realloc buffer, anyway
for (int row = 0; row < rows; ++row)
for (int col = 0; col < columns; ++col)
Element(row, col) = matrcopy.Element(col, row);
}
}
/// Returns the determinant of the matrix.
/// Note! This method must be used only with max 4x4 matrices,
/// otherwise it throws an exception.
Real Det() {
assert(this->GetRows() == this->GetColumns());
assert(this->GetRows() <= 4);
if (this->GetRows() != this->GetColumns())
throw("Cannot compute matrix determinant because rectangular matrix");
if (this->GetRows() > 4)
throw("Cannot compute matrix determinant because matr. larger than 3x3");
Real det = 0;
switch (this->GetRows()) {
case 1:
det = (*this)(0, 0);
break;
case 2:
det = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0);
break;
case 3:
det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(2, 0) * (*this)(1, 1) * (*this)(0, 2) -
(*this)(2, 1) * (*this)(1, 2) * (*this)(0, 0) - (*this)(2, 2) * (*this)(1, 0) * (*this)(0, 1);
break;
case 4:
det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3) +
(*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) +
(*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) +
(*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3) +
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) +
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) -
(*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) -
(*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3) -
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) -
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2) -
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1);
break;
}
return det;
}
/// Returns the inverse of the matrix.
/// Note! This method must be used only with max 4x4 matrices,
/// otherwise it throws an exception.
void MatrInverse() {
assert(this->GetRows() == this->GetColumns());
assert(this->GetRows() <= 4);
assert(this->Det() != 0);
if (this->GetRows() != this->GetColumns())
throw("Cannot compute matrix inverse because rectangular matrix");
if (this->GetRows() > 4)
throw("Cannot compute matrix inverse because matr. larger than 4x4");
if (this->Det() == 0)
throw("Cannot compute matrix inverse because singular matrix");
switch (this->GetRows()) {
case 1:
(*this)(0, 0) = (1 / (*this)(0, 0));
break;
case 2: {
ChMatrixDynamic<Real> inv(2, 2);
inv(0, 0) = (*this)(1, 1);
inv(0, 1) = -(*this)(0, 1);
inv(1, 1) = (*this)(0, 0);
inv(1, 0) = -(*this)(1, 0);
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
case 3: {
ChMatrixDynamic<Real> inv(3, 3);
inv(0, 0) = (*this)(1, 1) * (*this)(2, 2) - (*this)(1, 2) * (*this)(2, 1);
inv(0, 1) = (*this)(2, 1) * (*this)(0, 2) - (*this)(0, 1) * (*this)(2, 2);
inv(0, 2) = (*this)(0, 1) * (*this)(1, 2) - (*this)(0, 2) * (*this)(1, 1);
inv(1, 0) = (*this)(1, 2) * (*this)(2, 0) - (*this)(1, 0) * (*this)(2, 2);
inv(1, 1) = (*this)(2, 2) * (*this)(0, 0) - (*this)(2, 0) * (*this)(0, 2);
inv(1, 2) = (*this)(0, 2) * (*this)(1, 0) - (*this)(1, 2) * (*this)(0, 0);
inv(2, 0) = (*this)(1, 0) * (*this)(2, 1) - (*this)(1, 1) * (*this)(2, 0);
inv(2, 1) = (*this)(0, 1) * (*this)(2, 0) - (*this)(0, 0) * (*this)(2, 1);
inv(2, 2) = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0);
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
case 4: {
ChMatrixDynamic<Real> inv(4, 4);
inv.SetElement(
0, 0,
(*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) - (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) - (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) + (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
0, 1,
(*this)(0, 3) * (*this)(2, 2) * (*this)(3, 1) - (*this)(0, 2) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(0, 3) * (*this)(2, 1) * (*this)(3, 2) + (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(0, 2) * (*this)(2, 1) * (*this)(3, 3) - (*this)(0, 1) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
0, 2,
(*this)(0, 2) * (*this)(1, 3) * (*this)(3, 1) - (*this)(0, 3) * (*this)(1, 2) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 1) * (*this)(3, 2) - (*this)(0, 1) * (*this)(1, 3) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(1, 1) * (*this)(3, 3) + (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 3));
inv.SetElement(
0, 3,
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) - (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) -
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) +
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) - (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3));
inv.SetElement(
1, 0,
(*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) - (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) + (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) - (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
1, 1,
(*this)(0, 2) * (*this)(2, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(2, 2) * (*this)(3, 0) +
(*this)(0, 3) * (*this)(2, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(2, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
1, 2,
(*this)(0, 3) * (*this)(1, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(1, 3) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 2) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 3));
inv.SetElement(
1, 3,
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) - (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) - (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) + (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3));
inv.SetElement(
2, 0,
(*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) - (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) +
(*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) - (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) + (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3));
inv.SetElement(
2, 1,
(*this)(0, 3) * (*this)(2, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(2, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 1) +
(*this)(0, 1) * (*this)(2, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 3));
inv.SetElement(
2, 2,
(*this)(0, 1) * (*this)(1, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 1) * (*this)(3, 0) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 3));
inv.SetElement(
2, 3,
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) - (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) + (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) - (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3));
inv.SetElement(
3, 0,
(*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) - (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) -
(*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1) + (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) - (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2));
inv.SetElement(
3, 1,
(*this)(0, 1) * (*this)(2, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(2, 1) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(2, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(2, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 2));
inv.SetElement(
3, 2,
(*this)(0, 2) * (*this)(1, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 0) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 1) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 2));
inv.SetElement(
3, 3,
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) - (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) + (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2));
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
}
}
/// Returns true if vector is identical to other matrix
bool Equals(const ChMatrix<Real>& other) { return Equals(other, 0.0); }
/// Returns true if vector equals another vector, within a tolerance 'tol'
bool Equals(const ChMatrix<Real>& other, Real tol) {
if ((other.GetColumns() != this->columns) || (other.GetRows() != this->rows))
return false;
for (int nel = 0; nel < rows * columns; ++nel)
if (fabs(ElementN(nel) - other.ElementN(nel)) > tol)
return false;
return true;
}
/// Multiplies this 3x4 matrix by a quaternion, as v=[G]*q
/// The matrix must be 3x4.
/// \return The result of the multiplication, i.e. a vector.
template <class RealB>
ChVector<Real> Matr34_x_Quat(const ChQuaternion<RealB>& qua) {
assert((rows == 3) && (columns == 4));
return ChVector<Real>(Get34Element(0, 0) * (Real)qua.e0() + Get34Element(0, 1) * (Real)qua.e1() +
Get34Element(0, 2) * (Real)qua.e2() + Get34Element(0, 3) * (Real)qua.e3(),
Get34Element(1, 0) * (Real)qua.e0() + Get34Element(1, 1) * (Real)qua.e1() +
Get34Element(1, 2) * (Real)qua.e2() + Get34Element(1, 3) * (Real)qua.e3(),
Get34Element(2, 0) * (Real)qua.e0() + Get34Element(2, 1) * (Real)qua.e1() +
Get34Element(2, 2) * (Real)qua.e2() + Get34Element(2, 3) * (Real)qua.e3());
}
/// Multiplies this 3x4 matrix (transposed) by a vector, as q=[G]'*v
/// The matrix must be 3x4.
/// \return The result of the multiplication, i.e. a quaternion.
template <class RealB>
ChQuaternion<Real> Matr34T_x_Vect(const ChVector<RealB>& va) {
assert((rows == 3) && (columns == 4));
return ChQuaternion<Real>(
Get34Element(0, 0) * (Real)va.x() + Get34Element(1, 0) * (Real)va.y() + Get34Element(2, 0) * (Real)va.z(),
Get34Element(0, 1) * (Real)va.x() + Get34Element(1, 1) * (Real)va.y() + Get34Element(2, 1) * (Real)va.z(),
Get34Element(0, 2) * (Real)va.x() + Get34Element(1, 2) * (Real)va.y() + Get34Element(2, 2) * (Real)va.z(),
Get34Element(0, 3) * (Real)va.x() + Get34Element(1, 3) * (Real)va.y() + Get34Element(2, 3) * (Real)va.z());
}
/// Multiplies this 4x4 matrix (transposed) by a quaternion,
/// The matrix must be 4x4.
/// \return The result of the multiplication, i.e. a quaternion.
template <class RealB>
ChQuaternion<Real> Matr44_x_Quat(const ChQuaternion<RealB>& qua) {
assert((rows == 4) && (columns == 4));
return ChQuaternion<Real>(Get44Element(0, 0) * (Real)qua.e0() + Get44Element(0, 1) * (Real)qua.e1() +
Get44Element(0, 2) * (Real)qua.e2() + Get44Element(0, 3) * (Real)qua.e3(),
Get44Element(1, 0) * (Real)qua.e0() + Get44Element(1, 1) * (Real)qua.e1() +
Get44Element(1, 2) * (Real)qua.e2() + Get44Element(1, 3) * (Real)qua.e3(),
Get44Element(2, 0) * (Real)qua.e0() + Get44Element(2, 1) * (Real)qua.e1() +
Get44Element(2, 2) * (Real)qua.e2() + Get44Element(2, 3) * (Real)qua.e3(),
Get44Element(3, 0) * (Real)qua.e0() + Get44Element(3, 1) * (Real)qua.e1() +
Get44Element(3, 2) * (Real)qua.e2() + Get44Element(3, 3) * (Real)qua.e3());
}
/// Transposes only the lower-right 3x3 submatrix of a hemisimetric 4x4 matrix,
/// used when the 4x4 matrix is a "star" matrix [q] coming from a quaternion q:
/// the non commutative quat. product is:
/// q1 x q2 = [q1]*q2 = [q2st]*q1
/// where [q2st] is the "semitranspose of [q2].
void MatrXq_SemiTranspose() {
SetElement(1, 2, -GetElement(1, 2));
SetElement(1, 3, -GetElement(1, 3));
SetElement(2, 1, -GetElement(2, 1));
SetElement(2, 3, -GetElement(2, 3));
SetElement(3, 1, -GetElement(3, 1));
SetElement(3, 2, -GetElement(3, 2));
}
/// Change the sign of the 2nd, 3rd and 4th columns of a 4x4 matrix,
/// The product between a quaternion q1 and the coniugate of q2 (q2'), is:
/// q1 x q2' = [q1]*q2' = [q1sn]*q2
/// where [q1sn] is the seminegation of the 4x4 matrix [q1].
void MatrXq_SemiNeg() {
for (int i = 0; i < rows; ++i)
for (int j = 1; j < columns; ++j)
SetElement(i, j, -GetElement(i, j));
}
/// Gets the norm infinite of the matrix, i.e. the max.
/// of its elements in absolute value.
Real NormInf() {
Real norm = 0;
for (int nel = 0; nel < rows * columns; ++nel)
if ((fabs(ElementN(nel))) > norm)
norm = fabs(ElementN(nel));
return norm;
}
/// Gets the norm two of the matrix, i.e. the square root
/// of the sum of the elements squared.
Real NormTwo() {
Real norm = 0;
for (int nel = 0; nel < rows * columns; ++nel)
norm += ElementN(nel) * ElementN(nel);
return (sqrt(norm));
}
/// Finds max value among the values of the matrix
Real Max() {
Real mmax = GetElement(0, 0);
for (int nel = 0; nel < rows * columns; ++nel)
if (ElementN(nel) > mmax)
mmax = ElementN(nel);
return mmax;
}
/// Finds min value among the values of the matrix
Real Min() {
Real mmin = GetElement(0, 0);
for (int nel = 0; nel < rows * columns; ++nel)
if (ElementN(nel) < mmin)
mmin = ElementN(nel);
return mmin;
}
/// Linear interpolation of two matrices. Parameter mx must be 0...1.
/// [this] =(1-x)[A]+ (x)[B] Matrices must have the same size!!
void LinInterpolate(const ChMatrix<Real>& matra, const ChMatrix<Real>& matrb, Real mx) {
assert(matra.columns == matrb.columns && matra.rows == matrb.rows);
for (int nel = 0; nel < rows * columns; nel++)
ElementN(nel) = matra.ElementN(nel) * (1 - mx) + matrb.ElementN(nel) * (mx);
}
/// Fills a matrix or a vector with a bilinear interpolation,
/// from corner values (as a u-v patch).
void RowColInterp(Real vmin, Real vmax, Real umin, Real umax) {
for (int iu = 0; iu < GetColumns(); iu++)
for (int iv = 0; iv < GetRows(); iv++) {
if (GetRows() > 1)
Element(iv, iu) = vmin + (vmax - vmin) * ((Real)iv / ((Real)(GetRows() - 1)));
if (GetColumns() > 1)
Element(iv, iu) += umin + (umax - umin) * ((Real)iu / ((Real)(GetColumns() - 1)));
}
}
//
// BOOKKEEPING
//
/// Paste a matrix "matra" into "this", inserting at location insrow-inscol.
/// Normal copy for insrow=inscol=0
template <class RealB>
void PasteMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(i + insrow, j + inscol) = (Real)matra.Element(i, j);
}
/// Paste a matrix "matra" into "this", inserting at location insrow-inscol
/// and performing a sum with the preexisting values.
template <class RealB>
void PasteSumMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(i + insrow, j + inscol) += (Real)matra.Element(i, j);
}
/// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol.
/// Normal copy for insrow=inscol=0
template <class RealB>
void PasteTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(j + insrow, i + inscol) = (Real)matra.Element(i, j);
}
/// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol
/// and performing a sum with the preexisting values.
template <class RealB>
void PasteSumTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(j + insrow, i + inscol) += (Real)matra.Element(i, j);
}
/// Paste a clipped portion of the matrix "matra" into "this",
/// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol.
template <class RealB>
void PasteClippedMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insrow,
int inscol) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
Element(i + insrow, j + inscol) = (Real)matra.Element(i + cliprow, j + clipcol);
}
/// Paste a clipped portion of the matrix "matra" into "this", where "this"
/// is a vector (of ChMatrix type),
/// inserting the clip (of size nrows, ncolumns) at the location insindex.
template <class RealB>
void PasteClippedMatrixToVector(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insindex) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
ElementN(insindex + i * ncolumns + j) = (Real)matra.Element(cliprow + i, clipcol + j);
}
/// Paste a clipped portion of a vector into "this", where "this"
/// is a matrix (of ChMatrix type),
/// inserting the clip (of size nrows, ncolumns) at the location insindex.
template <class RealB>
void PasteClippedVectorToMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insindex) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
Element(i + cliprow, j + clipcol) = (Real)matra.ElementN(insindex + i * ncolumns + j);
}
/// Paste a clipped portion of the matrix "matra" into "this", performing a sum with preexisting values,
/// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol.
template <class RealB>
void PasteSumClippedMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insrow,
int inscol) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
#pragma omp atomic
Element(i + insrow, j + inscol) += (Real)matra.Element(i + cliprow, j + clipcol);
}
/// Paste a vector "va" into the matrix.
template <class RealB>
void PasteVector(const ChVector<RealB>& va, int insrow, int inscol) {
SetElement(insrow + 0, inscol, (Real)va.x());
SetElement(insrow + 1, inscol, (Real)va.y());
SetElement(insrow + 2, inscol, (Real)va.z());
}
/// Paste a vector "va" into the matrix, summing it with preexisting values.
template <class RealB>
void PasteSumVector(const ChVector<RealB>& va, int insrow, int inscol) {
Element(insrow + 0, inscol) += (Real)va.x();
Element(insrow + 1, inscol) += (Real)va.y();
Element(insrow + 2, inscol) += (Real)va.z();
}
/// Paste a vector "va" into the matrix, subtracting it from preexisting values.
template <class RealB>
void PasteSubVector(const ChVector<RealB>& va, int insrow, int inscol) {
Element(insrow + 0, inscol) -= (Real)va.x();
Element(insrow + 1, inscol) -= (Real)va.y();
Element(insrow + 2, inscol) -= (Real)va.z();
}
/// Paste a quaternion into the matrix.
template <class RealB>
void PasteQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) {
SetElement(insrow + 0, inscol, (Real)qa.e0());
SetElement(insrow + 1, inscol, (Real)qa.e1());
SetElement(insrow + 2, inscol, (Real)qa.e2());
SetElement(insrow + 3, inscol, (Real)qa.e3());
}
/// Paste a quaternion into the matrix, summing it with preexisting values.
template <class RealB>
void PasteSumQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) {
Element(insrow + 0, inscol) += (Real)qa.e0();
Element(insrow + 1, inscol) += (Real)qa.e1();
Element(insrow + 2, inscol) += (Real)qa.e2();
Element(insrow + 3, inscol) += (Real)qa.e3();
}
/// Paste a coordsys into the matrix.
template <class RealB>
void PasteCoordsys(const ChCoordsys<RealB>& cs, int insrow, int inscol) {
PasteVector(cs.pos, insrow, inscol);
PasteQuaternion(cs.rot, insrow + 3, inscol);
}
/// Returns the vector clipped from insrow, inscol.
ChVector<Real> ClipVector(int insrow, int inscol) const {
return ChVector<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol));
}
/// Returns the quaternion clipped from insrow, inscol.
ChQuaternion<Real> ClipQuaternion(int insrow, int inscol) const {
return ChQuaternion<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol),
Element(insrow + 3, inscol));
}
/// Returns the coordsys clipped from insrow, inscol.
ChCoordsys<Real> ClipCoordsys(int insrow, int inscol) const {
return ChCoordsys<Real>(ClipVector(insrow, inscol), ClipQuaternion(insrow + 3, inscol));
}
//
// MULTIBODY SPECIFIC MATH FUCTION
//
/// Fills a 4x4 matrix as the "star" matrix, representing quaternion cross product.
/// That is, given two quaternions a and b, aXb= [Astar]*b
template <class RealB>
void Set_Xq_matrix(const ChQuaternion<RealB>& q) {
Set44Element(0, 0, (Real)q.e0());
Set44Element(0, 1, -(Real)q.e1());
Set44Element(0, 2, -(Real)q.e2());
Set44Element(0, 3, -(Real)q.e3());
Set44Element(1, 0, (Real)q.e1());
Set44Element(1, 1, (Real)q.e0());
Set44Element(1, 2, -(Real)q.e3());
Set44Element(1, 3, (Real)q.e2());
Set44Element(2, 0, (Real)q.e2());
Set44Element(2, 1, (Real)q.e3());
Set44Element(2, 2, (Real)q.e0());
Set44Element(2, 3, -(Real)q.e1());
Set44Element(3, 0, (Real)q.e3());
Set44Element(3, 1, -(Real)q.e2());
Set44Element(3, 2, (Real)q.e1());
Set44Element(3, 3, (Real)q.e0());
}
};
} // end namespace chrono
#endif
|
db_graph.h | #ifndef _DB_GRAPH_H_
#define _DB_GRAPH_H_
#include "../io/io.h"
namespace db {
template <typename T>
class GraphArc {
private:
const T* _onet = nullptr;
const T* _inet = nullptr;
unsigned _onodeIdx = 0;
unsigned _inodeIdx = 0;
double _cost = 0;
public:
GraphArc(const T* onet, const T* inet, const unsigned onodeIdx, const unsigned inodeIdx, const double cost)
: _onet(onet), _inet(inet), _onodeIdx(onodeIdx), _inodeIdx(inodeIdx), _cost(cost) {}
const T* inet() const { return _inet; }
double cost() const { return _cost; }
unsigned write(
ostream& os, const string& design, const Rectangle& die, const Point& pitch, const unsigned dir) const;
bool operator<(const GraphArc<T>& rhs) { return _cost < rhs._cost; }
};
template <typename T>
unsigned GraphArc<T>::write(
ostream& os, const string& design, const Rectangle& die, const Point& pitch, const unsigned dir) const {
const NetRouteUpNode& onode = _onet->upVias[_onodeIdx];
const NetRouteUpNode& inode = _inet->upVias[_inodeIdx];
const int dx = onode[dir] - inode[dir];
const int dy = onode[1 - dir] - inode[1 - dir];
const double px = dx / static_cast<double>(pitch[dir]);
const double py = dy / static_cast<double>(pitch[1 - dir]);
const double rx = dx / static_cast<double>(die[dir].range());
const double ry = dy / static_cast<double>(die[1 - dir].range());
const unsigned numOPins = _onet->numOPins();
os << _onet->name() << ',' << _inet->name() << ',' << _onodeIdx << ',' << _inodeIdx << ',' << px << ',' << py << ','
<< abs(px) << ',' << abs(py) << ',' << rx << ',' << ry << ',' << abs(rx) << ',' << abs(ry) << ',' << numOPins
<< ',' << _inet->numOPins() << ',';
if (_onet->upPin()) {
os << "1,";
} else {
os << "0,";
}
if (_inet->upPin()) {
os << "1,";
} else {
os << "0,";
}
os << _onet->pitchLen() << ',' << _onet->numVias() << ',' << _onet->pitchLen(DBModule::Metal - 1) << ',';
for (int i = static_cast<int>(DBModule::Metal) - 2; i != static_cast<int>(DBModule::Metal) - 5; --i) {
if (i >= 0) {
os << _onet->numVias(i) << ',' << _onet->pitchLen(i) << ',';
} else {
os << 0 << ',' << 0 << ',';
}
}
os << _inet->pitchLen() << ',' << _inet->numVias() << ',' << _inet->pitchLen(DBModule::Metal - 1) << ',';
for (int i = static_cast<int>(DBModule::Metal) - 2; i != static_cast<int>(DBModule::Metal) - 5; --i) {
if (i >= 0) {
os << _inet->numVias(i) << ',' << _inet->pitchLen(i) << ',';
} else {
os << 0 << ',' << 0 << ',';
}
}
if (_onet->parent() == _inet->parent()) {
os << "1\n";
return numOPins;
} else {
os << "0\n";
return 0;
}
}
template <typename T>
class Graph {
private:
static constexpr double INFTY =
numeric_limits<double>::has_infinity ? numeric_limits<double>::infinity() : numeric_limits<double>::max();
unsigned _nONodes = 0;
unsigned _nINodes = 0;
unsigned _nOPins = 0;
using CostType = long long;
using UCostType = unsigned long long;
vector<T*> _onodes;
public:
static vector<T*> inodes;
static mutex selMtx;
static unsigned totalNumONetsMissed;
static unsigned nFlows;
static unsigned nCoveredNets;
static unsigned nCoveredPins;
static ofstream selOfs;
private:
void addNode(T* splitNet);
static void writeSel(
const T* onet, const string& design, const Rectangle& die, const Point& pitch, const unsigned dir);
static void writeImg(
const T* splitNet, const vector<T*>& splitNets, const Image& image, const string& path, const unsigned dir);
public:
bool run(const vector<T*>& splitNets,
const Image& image,
const Rectangle& die,
const Point& pitch,
const unsigned dir,
const string& file);
};
template <typename T>
vector<T*> Graph<T>::inodes;
template <typename T>
mutex Graph<T>::selMtx;
template <typename T>
unsigned Graph<T>::totalNumONetsMissed;
template <typename T>
unsigned Graph<T>::nFlows;
template <typename T>
unsigned Graph<T>::nCoveredNets;
template <typename T>
unsigned Graph<T>::nCoveredPins;
template <typename T>
ofstream Graph<T>::selOfs;
template <typename T>
void Graph<T>::addNode(T* splitNet) {
if (splitNet->isSource()) {
inodes.push_back(splitNet);
++_nINodes;
} else if (splitNet->isSink()) {
_onodes.push_back(splitNet);
++_nONodes;
_nOPins += splitNet->numOPins();
}
}
template <typename T>
void Graph<T>::writeSel(
const T* onet, const string& design, const Rectangle& die, const Point& pitch, const unsigned dir) {
bool isMissed = true;
bool isCovered = true;
vector<GraphArc<T>> arcs;
for (T* inet : inodes) {
double icap = 0;
const bool isLoop = T::isLoop(inet, onet);
if (!isLoop) {
unsigned onodeIdx = 0;
unsigned inodeIdx = 0;
UCostType cost = ULLONG_MAX;
bool select = false;
const int dieW = die[dir].range();
for (unsigned i = 0; i != onet->upVias.size(); ++i) {
const NetRouteUpNode& on = onet->upVias[i];
const int ox = on[dir] - die[dir].lo();
const bool ob = ox < dieW * 0.08 || ox > dieW * 0.92;
for (unsigned j = 0; j != inet->upVias.size(); ++j) {
const NetRouteUpNode& in = inet->upVias[j];
const int ix = in[dir] - die[dir].lo();
const bool ib = ix < dieW * 0.08 || ix > dieW * 0.92;
const UCostType c = (static_cast<UCostType>(abs(ix - ox)) << 32) + abs(in[1 - dir] - on[1 - dir]);
const bool d = ob || ib || Net::isDirection(in, on);
if (d && c < cost) {
cost = c;
select = true;
onodeIdx = i;
inodeIdx = j;
}
}
}
if (select) {
if (arcs.size() < DBModule::NumCands) {
arcs.emplace_back(onet, inet, onodeIdx, inodeIdx, cost);
if (arcs.size() == DBModule::NumCands) make_heap(arcs.begin(), arcs.end());
} else if (cost < arcs.front().cost()) {
pop_heap(arcs.begin(), arcs.end());
if (onet->parent() == arcs.back().inet()->parent()) {
isCovered = false;
// return;
}
arcs.pop_back();
arcs.emplace_back(onet, inet, onodeIdx, inodeIdx, cost);
push_heap(arcs.begin(), arcs.end());
} else if (onet->parent() == inet->parent()) {
isCovered = false;
// return;
}
icap = 1;
}
}
if (onet->parent() == inet->parent()) {
if (icap) {
isMissed = false;
} else {
if (isLoop) printlog(LOG_WARN, "onet %s inet %s is loop", onet->name().c_str(), inet->name().c_str());
isCovered = false;
// lock_guard<mutex> lock(selMtx);
// ++totalNumONetsMissed;
// return;
}
}
}
lock_guard<mutex> lock(selMtx);
if (isMissed) {
++totalNumONetsMissed;
// return;
}
if (isCovered) {
++nCoveredNets;
nCoveredPins += onet->numOPins();
}
nFlows += arcs.size();
for (const GraphArc<T>& arc : arcs) arc.write(selOfs, design, die, pitch, dir);
}
template <typename T>
void Graph<T>::writeImg(
const T* splitNet, const vector<T*>& splitNets, const Image& image, const string& path, const unsigned dir) {
for (unsigned i = 0; i != splitNet->upVias.size(); ++i) {
const NetRouteUpNode& node = splitNet->upVias[i];
int x = node.x();
int y = node.y();
for (unsigned j = 1; j <= 4; j *= 2) {
const double xOrigin = x - image.xStep() * j * 49.5;
const double yOrigin = y - image.yStep() * j * 49.5;
const double xStep = image.xStep() * j;
const double yStep = image.yStep() * j;
const int xCeil =
min<int>(image.xNum(), ceil((xOrigin + xStep * 99 - image.xOrigin()) / image.xStep()) + 0.5);
const int yCeil =
min<int>(image.yNum(), ceil((yOrigin + yStep * 99 - image.yOrigin()) / image.yStep()) + 0.5);
Image img(xOrigin, yOrigin, xStep, yStep, 99, 99);
img.setRouting(splitNet, 4);
for (unsigned yIdx = max<int>(0, (yOrigin - image.yOrigin()) / image.yStep());
static_cast<int>(yIdx) < yCeil;
++yIdx) {
for (unsigned xIdx = max<int>(0, (xOrigin - image.xOrigin()) / image.xStep());
static_cast<int>(xIdx) < xCeil;
++xIdx) {
img.setData(image.yOrigin() + image.yStep() * (yIdx + 0.5),
image.xOrigin() + image.xStep() * (xIdx + 0.5),
image.data(yIdx, xIdx));
}
}
img.encode(path + "/" + splitNet->name() + "_" + to_string(i) + "_" + to_string(j) + ".png", dir);
}
}
}
template <typename T>
bool Graph<T>::run(const vector<T*>& splitNets,
const Image& image,
const Rectangle& die,
const Point& pitch,
const unsigned dir,
const string& file) {
for (T* splitNet : splitNets) {
if (splitNet->upVias.empty()) continue;
addNode(splitNet);
}
size_t pos0 = file.find_last_of('/');
string path = ".";
string csv = file;
if (pos0 != string::npos) {
path = file.substr(0, pos0);
csv = file.substr(pos0 + 1);
}
size_t pos1 = csv.find('.');
const string& base = csv.substr(0, pos1);
path = path + "/" + base;
size_t pos2 = base.find_last_of('_');
const string& design = base.substr(0, pos2);
ofstream wscOfs = io::IOModule::write(path + "/" + base + ".wsc.csv", true);
ofstream drvOfs = io::IOModule::write(path + "/" + base + ".drv.csv", true);
ofstream snkOfs = io::IOModule::write(path + "/" + base + ".snk.csv", true);
wscOfs << "Vpin ID,X coordinate,Y coordinate,WL down to L1,Pin Type,NumberLayer One Pins,Ave Cell Area Input,Ave "
"Cell Area Output,Ave Pin X coordinate,Ave Pin Y "
"coordinate,CongestionCrouting,CongestionPlacement,Matching Vpin ID\n";
drvOfs << "DESIGN,PARENT,NAME,VIA_IDX,VIA_ABSOLUTE_X,VIA_ABSOLUTE_Y,VIA_RELATIVE_X,VIA_RELATIVE_Y,DIR_NN,DIR_NP,"
"DIR_PN,DIR_PP,SINK_COUNT,UP_PIN,WL,VIAS,WL_M4";
snkOfs << "DESIGN,PARENT,NAME,VIA_IDX,VIA_ABSOLUTE_X,VIA_ABSOLUTE_Y,VIA_RELATIVE_X,VIA_RELATIVE_Y,DIR_NN,DIR_NP,"
"DIR_PN,DIR_PP,SINK_COUNT,UP_PIN,WL,VIAS,WL_M4";
for (unsigned i = 3; i; --i) {
drvOfs << ",VIAS_V" << i << i + 1 << ",WL_M" << i;
snkOfs << ",VIAS_V" << i << i + 1 << ",WL_M" << i;
}
drvOfs << endl;
snkOfs << endl;
for (unsigned inetIdx = 0; inetIdx != inodes.size(); ++inetIdx) {
const T* inet = inodes[inetIdx];
for (const T* onet : _onodes) {
if (onet->parent() != inet->parent()) continue;
inet->write(drvOfs, design, die, pitch, dir);
onet->write(snkOfs, design, die, pitch, dir);
const NetRouteUpNode& ivia = inet->upVias[0];
const NetRouteUpNode& ovia = onet->upVias[0];
wscOfs << 'S' << inetIdx * 2 << ',' << ivia[dir] << ',' << ivia[1 - dir] << ',' << inet->len() << ",O,"
<< inet->numPins() << ',' << inet->oArea() << ',' << inet->iArea() << ',';
const Point iMeanPin = inet->meanPin();
wscOfs << iMeanPin[dir] << ',' << iMeanPin[1 - dir] << ",0,0,S" << inetIdx * 2 + 1 << endl;
wscOfs << 'S' << inetIdx * 2 + 1 << ',' << ovia[dir] << ',' << ovia[1 - dir] << ',' << onet->len() << ",I,"
<< onet->numPins() << ',' << onet->oArea() << ",0,";
const Point oMeanPin = onet->meanPin();
wscOfs << oMeanPin[dir] << ',' << oMeanPin[1 - dir] << ",0,0,S" << inetIdx * 2 << endl;
break;
}
}
wscOfs.close();
drvOfs.close();
snkOfs.close();
selOfs = io::IOModule::write(path + "/" + base + ".sel.csv", true);
selOfs << "SNK_NAME,DRV_NAME,SNK_VIA_IDX,DRV_VIA_IDX,SIGNED_ABSOLUTE_DIST_X,SIGNED_ABSOLUTE_DIST_Y,UNSIGNED_"
"ABSOLUTE_DIST_X,UNSIGNED_ABSOLUTE_DIST_Y,SIGNED_RELATIVE_DIST_X,SIGNED_RELATIVE_DIST_Y,UNSIGNED_"
"RELATIVE_DIST_X,UNSIGNED_RELATIVE_DIST_Y,SNK_SINK_COUNT,DRV_SINK_COUNT,SNK_UP_PIN,DRV_UP_PIN,SNK_WL,SNK_"
"VIAS,SNK_WL_M4,";
for (unsigned i = 3; i; --i) selOfs << "SNK_VIAS_V" << i << i + 1 << ",SNK_WL_M" << i << ',';
selOfs << "DRV_WL,DRV_VIAS,DRV_WL_M4,";
for (unsigned i = 3; i; --i) selOfs << "DRV_VIAS_V" << i << i + 1 << ",DRV_WL_M" << i << ',';
selOfs << "LABEL\n";
totalNumONetsMissed = 0;
nFlows = 0;
nCoveredNets = 0;
nCoveredPins = 0;
#pragma omp parallel for
for (unsigned i = 0; i < _nONodes; ++i) {
Graph<T>::writeSel(_onodes[i], design, die, pitch, dir);
}
selOfs.close();
if (totalNumONetsMissed) {
printlog(LOG_WARN,
"%u / %u = %f sink nets missed while adding arcs",
totalNumONetsMissed,
_nONodes,
totalNumONetsMissed / static_cast<double>(_nONodes));
}
printlog(LOG_INFO,
"%u iters %u driver nets %u flows cover %u / %u = %f sink nets and %u / %u = %f sink pins",
DBModule::NumCands,
_nINodes,
nFlows,
nCoveredNets,
_nONodes,
nCoveredNets / static_cast<double>(_nONodes),
nCoveredPins,
_nOPins,
nCoveredPins / static_cast<double>(_nOPins));
#pragma omp parallel for
for (unsigned i = 0; i < _nONodes; ++i) {
Graph<T>::writeImg(_onodes[i], splitNets, image, path, dir);
}
#pragma omp parallel for
for (unsigned i = 0; i < _nINodes; ++i) {
Graph<T>::writeImg(inodes[i], splitNets, image, path, dir);
}
return true;
}
} // namespace db
#endif
|
test.c |
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (1024*3)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
double S[N];
double p[2];
INIT();
long cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
int max_threads = 224;
#undef FOR_CLAUSES
#define FOR_CLAUSES
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], SUMS * (N/2*(N+1))))
}
//
// Test: private clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES private(p,q)
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
double p = 2; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[i] += p; \
B[i] += q; \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1))))
}
//
// Test: firstprivate clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES firstprivate(p,q)
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
double p = -4; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p; \
B[i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], SUMS * (N/2*(N+1))))
}
//
// Test: lastprivate clause on omp for.
//
double q0[1], q1[1], q2[1], q3[1], q4[1], q5[1], q6[1], q7[1], q8[1], q9[1];
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
TEST({
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
_Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])")
{
_Pragma("omp for lastprivate(q0)")
for (int i = 0; i < N; i++) {
q0[0] = C[i] + D[i];
A[i] += q0[0];
}
_Pragma("omp for schedule(auto) lastprivate(q1)")
for (int i = 0; i < N; i++) {
q1[0] = D[i] + E[i];
B[i] += q1[0];
}
_Pragma("omp for schedule(dynamic) lastprivate(q2)")
for (int i = 0; i < N; i++) {
q2[0] = C[i] + D[i];
A[i] += q2[0];
}
_Pragma("omp for schedule(guided) lastprivate(q3)")
for (int i = 0; i < N; i++) {
q3[0] = D[i] + E[i];
B[i] += q3[0];
}
_Pragma("omp for schedule(runtime) lastprivate(q4)")
for (int i = 0; i < N; i++) {
q4[0] = C[i] + D[i];
A[i] += q4[0];
}
_Pragma("omp for schedule(static) lastprivate(q5)")
for (int i = 0; i < N; i++) {
q5[0] = D[i] + E[i];
B[i] += q5[0];
}
_Pragma("omp for schedule(static,1) lastprivate(q6)")
for (int i = 0; i < N; i++) {
q6[0] = C[i] + D[i];
A[i] += q6[0];
}
_Pragma("omp for schedule(static,9) lastprivate(q7)")
for (int i = 0; i < N; i++) {
q7[0] = D[i] + E[i];
B[i] += q7[0];
}
_Pragma("omp for schedule(static,13) lastprivate(q8)")
for (int i = 0; i < N; i++) {
q8[0] = C[i] + D[i];
A[i] += q8[0];
}
_Pragma("omp for schedule(static,30000) lastprivate(q9)")
for (int i = 0; i < N; i++) {
q9[0] = D[i] + E[i];
B[i] += q9[0];
}
}
double tmp = q0[0] + q1[0] + q2[0] + q3[0] + q4[0] + \
q5[0] + q6[0] + q7[0] + q8[0] + q9[0];
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
}, VERIFY(0, 1, S[0], 5 * (N + (N/2*(N+1))) ));
}
//
// Test: private clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES private(p)
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
p[0] = 2; p[1] = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p[0] = C[i] + D[i]; \
p[1] = D[i] + E[i]; \
A[i] += p[0]; \
B[i] += p[1]; \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1))))
}
//
// Test: firstprivate clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES firstprivate(p)
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
p[0] = -4; p[1] = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p[0]; \
B[i] += D[i] + E[i] + p[1]; \
if (i == N-1) { \
p[0] += 6; \
p[1] += 9; \
} \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], SUMS * (N/2*(N+1))))
}
//
// Test: collapse clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES collapse(2)
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[i*3+j] += C[i*3+j] + D[i*3+j]; \
B[i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], SUMS * (N/2*(N+1))))
}
//
// Test: ordered clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES ordered
#include "defines.h"
for (int t = 0; t <= max_threads; t += max_threads) {
int threads[1]; threads[0] = t;
PARALLEL(
S[0] = 0; \
,
for (int i = 0; i < N; i++) { \
_Pragma("omp ordered") \
S[0] += C[i] + D[i]; \
}
,
{
},
VERIFY(0, 1, S[0], SUMS * (N/2*(N+1))))
}
//
// Test: nowait clause on omp for.
// FIXME: Not sure how to test for correctness.
//
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
TEST({
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
_Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])")
{
_Pragma("omp for nowait schedule(static,1)")
for (int i = 0; i < N; i++) {
A[i] = C[i] + D[i];
}
_Pragma("omp for nowait schedule(static,1)")
for (int i = 0; i < N; i++) {
B[i] = A[i] + D[i] + E[i];
}
_Pragma("omp barrier")
if (omp_get_thread_num() == 0) {
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += B[i];
}
S[0] += tmp;
}
}
}, VERIFY(0, 1, S[0], (N/2*(N+1)) ));
}
//
// Test: Ensure coalesced scheduling on GPU.
//
if (!cpuExec) {
TESTD("omp target teams num_teams(1) thread_limit(33)", {
S[0] = 0;
for (int i = 0; i < 99; i++) {
A[i] = 0;
}
_Pragma("omp parallel num_threads(33)")
{
_Pragma("omp for")
for (int i = 0; i < 99; i++) {
A[i] += i - omp_get_thread_num();
}
_Pragma("omp for schedule(auto)")
for (int i = 0; i < 99; i++) {
A[i] += i - omp_get_thread_num();
}
_Pragma("omp for schedule(static,1)")
for (int i = 0; i < 99; i++) {
A[i] += i - omp_get_thread_num();
}
}
double tmp = 0;
for (int i = 0; i < 99; i++) {
tmp += A[i];
}
S[0] = tmp;
}, VERIFY(0, 1, S[0], 3 * (33*33 + 66*33) ));
} else {
DUMP_SUCCESS(1);
}
//
// Test: Ensure that we have barriers after dynamic, guided,
// and ordered schedules, even with a nowait clause since the
// NVPTX runtime doesn't currently support concurrent execution
// of these constructs.
// FIXME: Not sure how to test for correctness at runtime.
//
if (!cpuExec) {
TEST({
for (int i = 0; i < N; i++) {
A[i] = 0;
}
_Pragma("omp parallel")
{
_Pragma("omp for nowait schedule(guided)")
for (int i = 0; i < N; i++) {
A[i] += C[i] + D[i];
}
_Pragma("omp for nowait schedule(dynamic)")
for (int i = 0; i < N; i++) {
A[i] += D[i] + E[i];
}
_Pragma("omp for nowait ordered")
for (int i = 0; i < N; i++) {
A[i] += C[i] + D[i];
}
}
}, VERIFY(0, N, A[i], 2*i+2) );
} else {
DUMP_SUCCESS(1);
}
//
// Test: Linear clause on target
//
if (!cpuExec) {
int l = 0;
ZERO(A);
#pragma omp target map(tofrom:A)
#pragma omp parallel for linear(l:2)
for(int i = 0 ; i < 10 ; i++)
A[i] = l;
int fail = 0;
for(int i = 0 ; i < 10 ; i++)
if(A[i] != i*2) {
printf("error at %d, val = %lf expected = %d\n", i, A[i], i*2);
fail = 1;
}
if(fail)
printf("Error\n");
else
printf("Succeeded\n");
} else {
DUMP_SUCCESS(1);
}
return 0;
}
|
bml_submatrix_ellsort_typed.c | #ifdef BML_USE_MAGMA
#include "magma_v2.h"
#endif
#include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_logger.h"
#include "../bml_submatrix.h"
#include "../bml_types.h"
#include "../dense/bml_allocate_dense.h"
#include "bml_allocate_ellsort.h"
#include "bml_submatrix_ellsort.h"
#include "bml_types_ellsort.h"
#include <complex.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Determine element indices for submatrix, given a set of nodes/orbitals.
*
* \ingroup submatrix_group_C
*
* \param A Hamiltonian matrix A
* \param B Graph matrix B
* \param nodelist List of node/orbital indeces
* \param nsize Size of nodelist
* \param core_halo_index List of core+halo indeces
* \param vsize Size of core_halo_index and number of cores
* \param double_jump_flag Flag to use double jump (0=no, 1=yes)
*/
void TYPED_FUNC(
bml_matrix2submatrix_index_ellsort) (
bml_matrix_ellsort_t * A,
bml_matrix_ellsort_t * B,
int *nodelist,
int nsize,
int *core_halo_index,
int *vsize,
int double_jump_flag)
{
int l, ll, ii, ls, k;
int A_N = A->N;
int A_M = A->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
int B_N = B->N;
int B_M = B->M;
int *B_nnz = B->nnz;
int *B_index = B->index;
int ix[A_N];
memset(ix, 0, A_N * sizeof(int));
l = 0;
ll = 0;
// Cores are first followed by halos
for (int j = 0; j < nsize; j++)
{
ii = nodelist[j];
if (ix[ii] == 0)
{
ix[ii] = ii + 1;
core_halo_index[l] = ii;
l++;
ll++;
}
}
// Collect halo indeces from graph
for (int j = 0; j < nsize; j++)
{
ii = nodelist[j];
for (int jp = 0; jp < B_nnz[ii]; jp++)
{
k = B_index[ROWMAJOR(ii, jp, B_N, B_M)];
if (ix[k] == 0)
{
ix[k] = ii + 1;
core_halo_index[l] = k;
l++;
}
}
}
// Add more halo elements from H
for (int j = 0; j < nsize; j++)
{
ii = nodelist[j];
for (int jp = 0; jp < A_nnz[ii]; jp++)
{
k = A_index[ROWMAJOR(ii, jp, A_N, A_M)];
if (ix[k] == 0)
{
ix[k] = ii + 1;
core_halo_index[l] = k;
l++;
}
}
}
// Perform a "double jump" for extra halo elements
// based on graph, like performing a symbolic X^2
if (double_jump_flag == 1)
{
ls = l;
for (int j = 0; j < ls; j++)
{
ii = core_halo_index[j];
for (int jp = 0; jp < B_nnz[ii]; jp++)
{
k = B_index[ROWMAJOR(ii, jp, B_N, B_M)];
if (ix[k] == 0)
{
ix[k] = ii + 1;
core_halo_index[l] = k;
l++;
}
}
}
}
vsize[0] = l;
vsize[1] = ll;
}
/** Determine element indices for submatrix, given a set of nodes/orbitals.
*
* \ingroup submatrix_group_C
*
* \param B Graph matrix B
* \param nodelist List of node/orbital indeces
* \param nsize Size of nodelist
* \param core_halo_index List of core+halo indeces
* \param vsize Size of core_halo_index and number of cores
* \param double_jump_flag Flag to use double jump (0=no, 1=yes)
*/
void TYPED_FUNC(
bml_matrix2submatrix_index_graph_ellsort) (
bml_matrix_ellsort_t * B,
int *nodelist,
int nsize,
int *core_halo_index,
int *vsize,
int double_jump_flag)
{
int l, ll, ii, ls, k;
int B_N = B->N;
int B_M = B->M;
int *B_index = B->index;
int *B_nnz = B->nnz;
int ix[B_N];
memset(ix, 0, B_N * sizeof(int));
l = 0;
ll = 0;
// Cores are first followed by halos
for (int j = 0; j < nsize; j++)
{
ii = nodelist[j];
if (ix[ii] == 0)
{
ix[ii] = ii + 1;
core_halo_index[l] = ii;
l++;
ll++;
}
}
// Collext halo indeces from graph
for (int j = 0; j < nsize; j++)
{
ii = nodelist[j];
for (int jp = 0; jp < B_nnz[ii]; jp++)
{
k = B_index[ROWMAJOR(ii, jp, B_N, B_M)];
if (ix[k] == 0)
{
ix[k] = ii + 1;
core_halo_index[l] = k;
l++;
}
}
}
// Use graph for double jumps
if (double_jump_flag == 1)
{
ls = l;
for (int j = 0; j < ls; j++)
{
ii = core_halo_index[j];
for (int jp = 0; jp < B_nnz[ii]; jp++)
{
k = B_index[ROWMAJOR(ii, jp, B_N, B_M)];
if (ix[k] == 0)
{
ix[k] = ii + 1;
core_halo_index[l] = k;
l++;
}
}
}
}
vsize[0] = l;
vsize[1] = ll;
}
/** Extract a submatrix from a matrix given a set of core+halo rows.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param B Submatrix B
* \param core_halo_index Set of row indeces for submatrix
* \param llsize Number of indeces
*/
void TYPED_FUNC(
bml_matrix2submatrix_ellsort) (
bml_matrix_ellsort_t * A,
bml_matrix_dense_t * B,
int *core_halo_index,
int lsize)
{
REAL_T *rvalue;
int B_N = B->N;
#ifdef BML_USE_MAGMA
REAL_T *B_matrix = bml_allocate_memory(sizeof(REAL_T) * B->N * B->N);
#else
REAL_T *B_matrix = B->matrix;
#endif
#pragma omp parallel for \
private(rvalue) \
shared(core_halo_index) \
shared(A, B_matrix, B_N)
for (int jb = 0; jb < lsize; jb++)
{
rvalue = TYPED_FUNC(bml_getVector_ellsort) (A, core_halo_index,
core_halo_index[jb],
lsize);
for (int j = 0; j < lsize; j++)
{
B_matrix[ROWMAJOR(jb, j, B_N, B_N)] = rvalue[j];
}
free(rvalue);
}
#ifdef BML_USE_MAGMA
MAGMA(setmatrix) (B_N, B_N, (MAGMA_T *) B_matrix, B_N,
B->matrix, B->ld, B->queue);
free(B_matrix);
#endif
}
/** Assemble submatrix into a full matrix based on core+halo indeces.
*
* \ingroup submatrix_group_C
*
* \param A Submatrix A
* \param B Matrix B
* \param core_halo_index Set of submatrix row indeces
* \param lsize Number of indeces
* \param llsize Number of core positions
*/
void TYPED_FUNC(
bml_submatrix2matrix_ellsort) (
bml_matrix_dense_t * A,
bml_matrix_ellsort_t * B,
int *core_halo_index,
int lsize,
int llsize,
double threshold)
{
int A_N = A->N;
#ifdef BML_USE_MAGMA
REAL_T *A_matrix = bml_allocate_memory(sizeof(REAL_T) * A->N * A->N);
MAGMA(getmatrix) (A->N, A->N,
A->matrix, A->ld, (MAGMA_T *) A_matrix, A->N, A->queue);
#else
REAL_T *A_matrix = A->matrix;
#endif
int B_N = B->N;
int B_M = B->M;
int *B_nnz = B->nnz;
int *B_index = B->index;
REAL_T *B_value = B->value;
int ii, icol;
#pragma omp parallel for \
private(ii, icol) \
shared(core_halo_index) \
shared(A_N, A_matrix) \
shared(B_N, B_M, B_nnz, B_index, B_value)
for (int ja = 0; ja < llsize; ja++)
{
ii = core_halo_index[ja];
icol = 0;
for (int jb = 0; jb < lsize; jb++)
{
if (ABS(A_matrix[ROWMAJOR(ja, jb, A_N, A_N)]) > threshold)
{
B_index[ROWMAJOR(ii, icol, B_N, B_M)] = core_halo_index[jb];
B_value[ROWMAJOR(ii, icol, B_N, B_M)] =
A_matrix[ROWMAJOR(ja, jb, A_N, A_N)];
icol++;
}
}
if (icol > B_M)
{
LOG_ERROR("Number of non-zeroes per row >= M, Increase M\n");
}
B_nnz[ii] = icol;
}
#ifdef BML_USE_MAGMA
free(A_matrix);
#endif
}
// Get matching vector of values
void *TYPED_FUNC(
bml_getVector_ellsort) (
bml_matrix_ellsort_t * A,
int *jj,
int irow,
int colCnt)
{
REAL_T ZERO = 0.0;
int A_N = A->N;
int A_M = A->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
REAL_T *A_value = A->value;
REAL_T *rvalue = bml_noinit_allocate_memory(colCnt * sizeof(REAL_T));
for (int i = 0; i < colCnt; i++)
{
for (int j = 0; j < A_nnz[irow]; j++)
{
if (A_index[ROWMAJOR(irow, j, A_N, A_M)] == jj[i])
{
rvalue[i] = A_value[ROWMAJOR(irow, j, A_N, A_M)];
break;
}
rvalue[i] = ZERO;
}
}
return rvalue;
}
/** Assemble matrix based on groups of rows from a matrix.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param hindex Indeces of nodes
* \param ngroups Number of groups
* \param threshold Threshold for graph
*/
bml_matrix_ellsort_t *TYPED_FUNC(
bml_group_matrix_ellsort) (
bml_matrix_ellsort_t * A,
int *hindex,
int ngroups,
double threshold)
{
int A_N = A->N;
int A_M = A->M;
int *A_index = A->index;
int *A_nnz = A->nnz;
REAL_T *A_value = A->value;
#if !(defined(__IBMC_) || defined(__ibmxl__))
int ix[ngroups];
memset(ix, 0, sizeof(int) * ngroups);
#endif
int hnode[A_N];
int hend;
bml_matrix_dimension_t matrix_dimension = { ngroups, ngroups, ngroups };
bml_matrix_ellsort_t *B =
TYPED_FUNC(bml_noinit_matrix_ellsort) (matrix_dimension,
A->distribution_mode);
int B_N = B->N;
int B_M = B->M;
int *B_index = B->index;
int *B_nnz = B->nnz;
REAL_T *B_value = B->value;
#pragma omp parallel for \
private(hend) \
shared(hindex, hnode, A_N)
for (int i = 0; i < ngroups; i++)
{
hend = hindex[i + 1] - 1;
if (i == ngroups - 1)
hend = A_N;
for (int j = hindex[i] - 1; j < hend; j++)
{
hnode[j] = i;
}
}
#if defined(__IBMC_) || defined(__ibmxl__)
#pragma omp parallel for \
private(hend) \
shared(hindex, hnode) \
shared(A_nnz, A_index, A_value, A_N, A_M) \
shared(B_nnz, B_index, B_value, B_N, B_M)
#else
#pragma omp parallel for \
private(hend) \
shared(hindex, hnode) \
shared(A_nnz, A_index, A_value, A_N, A_M) \
shared(B_nnz, B_index, B_value, B_N, B_M) \
firstprivate(ix)
#endif
for (int i = 0; i < B_N; i++)
{
#if defined(__IBMC_) || defined(__ibmxl__)
int ix[ngroups];
memset(ix, 0, sizeof(int) * ngroups);
#endif
B_nnz[i] = 0;
hend = hindex[i + 1] - 1;
if (i == B_N - 1)
hend = A_N;
for (int j = hindex[i] - 1; j < hend; j++)
{
for (int k = 0; k < A_nnz[j]; k++)
{
int ii = hnode[A_index[ROWMAJOR(j, k, A_N, A_M)]];
if (ix[ii] == 0 &&
is_above_threshold(A_value[ROWMAJOR(j, k, A_N, A_M)],
threshold))
{
ix[ii] = i + 1;
B_index[ROWMAJOR(i, B_nnz[i], B_N, B_M)] = ii;
B_value[ROWMAJOR(i, B_nnz[i], B_N, B_M)] = 1.0;
B_nnz[i]++;
}
}
}
}
return B;
}
|
dsyr2k.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zsyr2k.c, normal z -> d, Fri Sep 28 17:38:03 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_syr2k
*
* Performs one of the symmetric rank 2k operations
*
* \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C, \f]
* or
* \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C, \f]
*
* where alpha and beta are scalars,
* C is an n-by-n symmetric matrix, and A and B are n-by-k matrices
* in the first case and k-by-n matrices in the second case.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans:
* \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C; \f]
* - PlasmaTrans:
* \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C. \f]
*
* @param[in] n
* The order of the matrix C. n >= zero.
*
* @param[in] k
* If trans = PlasmaNoTrans, number of columns of the A and B matrices;
* if trans = PlasmaTrans, number of rows of the A and B matrices.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* An lda-by-ka matrix.
* If trans = PlasmaNoTrans, ka = k;
* if trans = PlasmaTrans, ka = n.
*
* @param[in] lda
* The leading dimension of the array A.
* If trans = PlasmaNoTrans, lda >= max(1, n);
* if trans = PlasmaTrans, lda >= max(1, k).
*
* @param[in] pB
* An ldb-by-kb matrix.
* If trans = PlasmaNoTrans, kb = k;
* if trans = PlasmaTrans, kb = n.
*
* @param[in] ldb
* The leading dimension of the array B.
* If trans = PlasmaNoTrans, ldb >= max(1, n);
* if trans = PlasmaTrans, ldb >= max(1, k).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* An ldc-by-n matrix.
* On exit, the uplo part of the matrix is overwritten
* by the uplo part of the updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1, n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_dsyr2k
* @sa plasma_csyr2k
* @sa plasma_dsyr2k
* @sa plasma_ssyr2k
*
******************************************************************************/
int plasma_dsyr2k(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
double alpha, double *pA, int lda,
double *pB, int ldb,
double beta, double *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans)) {
plasma_error("illegal value of trans");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (k < 0) {
plasma_error("illegal value of k");
return -4;
}
int am, an;
int bm, bn;
if (trans == PlasmaNoTrans) {
am = n;
an = k;
bm = n;
bn = k;
}
else {
am = k;
an = n;
bm = k;
bn = n;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldb < imax(1, bm)) {
plasma_error("illegal value of ldb");
return -9;
}
if (ldc < imax(1, n)) {
plasma_error("illegal value of ldc");
return -12;
}
// quick return
if (n == 0 || ((alpha == 0.0 || k == 0.0) && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_syr2k(plasma, PlasmaRealDouble, n, k);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
bm, bn, 0, 0, bm, bn, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, n, 0, 0, n, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
plasma_omp_dge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_dsyr2k(uplo, trans,
alpha, A,
B,
beta, C,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_syr2k
*
* Performs rank 2k update.
* Non-blocking tile version of plasma_dsyr2k().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans:
* \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C; \f]
* - PlasmaTrans:
* \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C. \f]
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
*@param[in] B
* Descriptor of matrix B.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dsyr2k
* @sa plasma_omp_dsyr2k
* @sa plasma_omp_csyr2k
*
******************************************************************************/
void plasma_omp_dsyr2k(plasma_enum_t uplo, plasma_enum_t trans,
double alpha, plasma_desc_t A,
plasma_desc_t B,
double beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((trans != PlasmaNoTrans) && (trans != PlasmaTrans)) {
plasma_error("illegal value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid B");
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int k = trans == PlasmaNoTrans ? A.n : A.m;
if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pdsyr2k(uplo, trans,
alpha, A,
B,
beta, C,
sequence, request);
}
|
normal.c | /* =============================================================================
*
* normal.c
* -- Implementation of normal k-means clustering algorithm
*
* =============================================================================
*
* Author:
*
* Wei-keng Liao
* ECE Department, Northwestern University
* email: wkliao@ece.northwestern.edu
*
*
* Edited by:
*
* Jay Pisharath
* Northwestern University.
*
* Chi Cao Minh
* Stanford University
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#include "common.h"
#include "normal.h"
#include "random.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
#include "util.h"
double global_time = 0.0;
typedef struct args {
float** feature;
int nfeatures;
int npoints;
int nclusters;
int* membership;
float** clusters;
int** new_centers_len;
float** new_centers;
} args_t;
float global_delta;
int global_i; /* index into task queue */
#define CHUNK 3
/* =============================================================================
* work
* =============================================================================
*/
static void
work (void* argPtr)
{
TM_THREAD_ENTER();
args_t* args = (args_t*)argPtr;
float** feature = args->feature;
int nfeatures = args->nfeatures;
int npoints = args->npoints;
int nclusters = args->nclusters;
int* membership = args->membership;
float** clusters = args->clusters;
int** new_centers_len = args->new_centers_len;
float** new_centers = args->new_centers;
float delta = 0.0;
int index;
int i;
int j;
int start;
int stop;
int myId;
myId = thread_getId();
start = myId * CHUNK;
while (start < npoints) {
stop = (((start + CHUNK) < npoints) ? (start + CHUNK) : npoints);
for (i = start; i < stop; i++) {
index = common_findNearestPoint(feature[i],
nfeatures,
clusters,
nclusters);
/*
* If membership changes, increase delta by 1.
* membership[i] cannot be changed by other threads
*/
if (membership[i] != index) {
delta += 1.0;
}
/* Assign the membership to object i */
/* membership[i] can't be changed by other thread */
membership[i] = index;
/* Update new cluster centers : sum of objects located within */
TM_BEGIN();
TM_SHARED_WRITE(*new_centers_len[index],
TM_SHARED_READ(*new_centers_len[index]) + 1);
for (j = 0; j < nfeatures; j++) {
TM_SHARED_WRITE_F(
new_centers[index][j],
(TM_SHARED_READ_F(new_centers[index][j]) + feature[i][j])
);
}
TM_END();
}
/* Update task queue */
if (start + CHUNK < npoints) {
TM_BEGIN();
start = (int)TM_SHARED_READ(global_i);
TM_SHARED_WRITE(global_i, (start + CHUNK));
TM_END();
} else {
break;
}
}
TM_BEGIN();
TM_SHARED_WRITE_F(global_delta, TM_SHARED_READ_F(global_delta) + delta);
TM_END();
TM_THREAD_EXIT();
}
/* =============================================================================
* normal_exec
* =============================================================================
*/
float**
normal_exec (int nthreads,
float** feature, /* in: [npoints][nfeatures] */
int nfeatures,
int npoints,
int nclusters,
float threshold,
int* membership,
random_t* randomPtr) /* out: [npoints] */
{
int i;
int j;
int loop = 0;
int** new_centers_len; /* [nclusters]: no. of points in each cluster */
float delta;
float** clusters; /* out: [nclusters][nfeatures] */
float** new_centers; /* [nclusters][nfeatures] */
void* alloc_memory = NULL;
args_t args;
TIMER_T start;
TIMER_T stop;
/* Allocate space for returning variable clusters[] */
clusters = (float**)malloc(nclusters * sizeof(float*));
assert(clusters);
clusters[0] = (float*)malloc(nclusters * nfeatures * sizeof(float));
assert(clusters[0]);
for (i = 1; i < nclusters; i++) {
clusters[i] = clusters[i-1] + nfeatures;
}
/* Randomly pick cluster centers */
for (i = 0; i < nclusters; i++) {
int n = (int)(random_generate(randomPtr) % npoints);
for (j = 0; j < nfeatures; j++) {
clusters[i][j] = feature[n][j];
}
}
for (i = 0; i < npoints; i++) {
membership[i] = -1;
}
/*
* Need to initialize new_centers_len and new_centers[0] to all 0.
* Allocate clusters on different cache lines to reduce false sharing.
*/
{
int cluster_size = sizeof(int) + sizeof(float) * nfeatures;
const int cacheLineSize = 32;
cluster_size += (cacheLineSize-1) - ((cluster_size-1) % cacheLineSize);
alloc_memory = calloc(nclusters, cluster_size);
new_centers_len = (int**) malloc(nclusters * sizeof(int*));
new_centers = (float**) malloc(nclusters * sizeof(float*));
assert(alloc_memory && new_centers && new_centers_len);
for (i = 0; i < nclusters; i++) {
new_centers_len[i] = (int*)((char*)alloc_memory + cluster_size * i);
new_centers[i] = (float*)((char*)alloc_memory + cluster_size * i + sizeof(int));
}
}
TIMER_READ(start);
GOTO_SIM();
OSA_PRINT("entering parallel phase\n",0);
START_INSTRUMENTATION();
do {
delta = 0.0;
args.feature = feature;
args.nfeatures = nfeatures;
args.npoints = npoints;
args.nclusters = nclusters;
args.membership = membership;
args.clusters = clusters;
args.new_centers_len = new_centers_len;
args.new_centers = new_centers;
global_i = nthreads * CHUNK;
global_delta = delta;
#ifdef OTM
#pragma omp parallel
{
work(&args);
}
#else
thread_start(work, &args);
#endif
delta = global_delta;
/* Replace old cluster centers with new_centers */
for (i = 0; i < nclusters; i++) {
for (j = 0; j < nfeatures; j++) {
if (new_centers_len[i] > 0) {
clusters[i][j] = new_centers[i][j] / *new_centers_len[i];
}
new_centers[i][j] = 0.0; /* set back to 0 */
}
*new_centers_len[i] = 0; /* set back to 0 */
}
delta /= npoints;
} while ((delta > threshold) && (loop++ < 500));
OSA_PRINT("exiting parallel phase\n",0);
OSA_PRINT("END BENCHMARK kmeans-parallel-phase",0);
STOP_INSTRUMENTATION();
GOTO_REAL();
TIMER_READ(stop);
global_time += TIMER_DIFF_SECONDS(start, stop);
free(alloc_memory);
free(new_centers);
free(new_centers_len);
return clusters;
}
/* =============================================================================
*
* End of normal.c
*
* =============================================================================
*/
|
reduc.c | // Load the OpenMP functions library
#include<omp.h>
#include<math.h> //for pow function
int main()
{
// Set and initialise variables
int broad=0, tnum=0, calc=0;
// Start parallel block
// #pragma omp parallel private(broad,tnum,calc)
// {
// We want a single thread to broadcast a value to the entire block of
// threads.
// #pragma omp single copyprivate(broad)
// {
broad = omp_get_thread_num();
// }
// Print out the broadcast value
printf("broad = %d\n", broad);
// }
calc = 0;
// Each thread in this block will perform a calculation based on its thread
// number. These results are then reduced by performing an action on them, in
// this case a summation.
// #pragma omp parallel private(tnum) reduction(+:calc)
// {
tnum = omp_get_thread_num();
calc = pow(2,tnum);
printf("Thread number = %d calc = %d\n",tnum, calc);
// }
printf("Reduction = %d\n", calc);
return 0;
}
|
core_zttlqt.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
// This will be swapped during the automatic code generation.
#undef REAL
#define COMPLEX
/***************************************************************************//**
*
* @ingroup core_ttlqt
*
* Computes an LQ factorization of a rectangular matrix
* formed by coupling side-by-side an m-by-m lower triangular tile A1
* and an m-by-n lower triangular tile A2:
*
* | A1 A2 | = L * Q
*
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the tile A1 and A2. m >= 0.
* The number of columns of the tile A1.
*
* @param[in] n
* The number of columns of the tile A2. n >= 0.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A1
* On entry, the m-by-m tile A1.
* On exit, the elements on and below the diagonal of the array
* contain the m-by-m lower trapezoidal tile L;
* the elements above the diagonal are not referenced.
*
* @param[in] lda1
* The leading dimension of the array A1. lda1 >= max(1,m).
*
* @param[in,out] A2
* On entry, the m-by-n lower triangular tile A2.
* On exit, the elements on and below the diagonal of the array
* with the matrix T represent
* the unitary tile Q as a product of elementary reflectors.
*
* @param[in] lda2
* The leading dimension of the array A2. lda2 >= max(1,m).
*
* @param[out] T
* The ib-by-m triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param tau
* Auxiliary workspace array of length m.
*
* @param work
* Auxiliary workspace array of length ib*m.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_zttlqt(int m, int n, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
plasma_complex64_t *T, int ldt,
plasma_complex64_t *tau,
plasma_complex64_t *work)
{
// Check input arguments.
if (m < 0) {
plasma_coreblas_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_coreblas_error("illegal value of n");
return -2;
}
if (ib < 0) {
plasma_coreblas_error("illegal value of ib");
return -3;
}
if (A1 == NULL) {
plasma_coreblas_error("NULL A1");
return -4;
}
if (lda1 < imax(1, m) && m > 0) {
plasma_coreblas_error("illegal value of lda1");
return -5;
}
if (A2 == NULL) {
plasma_coreblas_error("NULL A2");
return -6;
}
if (lda2 < imax(1, m) && m > 0) {
plasma_coreblas_error("illegal value of lda2");
return -7;
}
if (T == NULL) {
plasma_coreblas_error("NULL T");
return -8;
}
if (ldt < imax(1, ib) && ib > 0) {
plasma_coreblas_error("illegal value of ldt");
return -9;
}
if (tau == NULL) {
plasma_coreblas_error("NULL tau");
return -10;
}
if (work == NULL) {
plasma_coreblas_error("NULL work");
return -11;
}
// quick return
if ((m == 0) || (n == 0) || (ib == 0))
return PlasmaSuccess;
// TODO: Need to check why some cases require this to avoid
// uninitialized values
//core_zlaset(PlasmaGeneral, ib, m, 0.0, 0.0, T, ldt);
for (int ii = 0; ii < m; ii += ib) {
int sb = imin(m-ii, ib);
for (int i = 0; i < sb; i++) {
int j = ii + i;
int mi = sb-i-1;
int ni = imin( j + 1, n);
// Generate elementary reflector H(ii*ib+i) to annihilate
// A(ii*ib+i, ii*ib+i:m).
#ifdef COMPLEX
LAPACKE_zlacgv_work(ni, &A2[j], lda2);
LAPACKE_zlacgv_work(1, &A1[lda1*j+j], lda1);
#endif
LAPACKE_zlarfg_work(ni+1, &A1[lda1*j+j], &A2[j], lda2, &tau[j]);
plasma_complex64_t alpha;
if (mi > 0) {
// Apply H(j-1) to A(j:ii+ib-1, j-1:m) from the right.
cblas_zcopy(
mi,
&A1[lda1*j+(j+1)], 1,
work, 1);
plasma_complex64_t zone = 1.0;
cblas_zgemv(
CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans,
mi, ni,
CBLAS_SADDR(zone), &A2[j+1], lda2,
&A2[j], lda2,
CBLAS_SADDR(zone), work, 1);
alpha = -(tau[j]);
cblas_zaxpy(
mi, CBLAS_SADDR(alpha),
work, 1,
&A1[lda1*j+j+1], 1);
cblas_zgerc(
CblasColMajor, mi, ni,
CBLAS_SADDR(alpha), work, 1,
&A2[j], lda2,
&A2[j+1], lda2);
}
// Calculate T.
if (i > 0 ) {
int l = imin(i, imax(0, n-ii));
alpha = -(tau[j]);
plasma_core_zpemv(
PlasmaNoTrans, PlasmaRowwise,
i, imin(j, n), l,
alpha, &A2[ii], lda2,
&A2[j], lda2,
0.0, &T[ldt*j], 1,
work);
// T(0:i-1, j) = T(0:i-1, ii:j-1) * T(0:i-1, j)
cblas_ztrmv(
CblasColMajor, (CBLAS_UPLO)PlasmaUpper,
(CBLAS_TRANSPOSE)PlasmaNoTrans,
(CBLAS_DIAG)PlasmaNonUnit,
i, &T[ldt*ii], ldt,
&T[ldt*j], 1);
}
#ifdef COMPLEX
LAPACKE_zlacgv_work(ni, &A2[j], lda2 );
LAPACKE_zlacgv_work(1, &A1[lda1*j+j], lda1 );
#endif
T[ldt*j+i] = tau[j];
}
// Apply Q to the rest of the matrix to the right.
if (m > ii+sb) {
int mi = m-(ii+sb);
int ni = imin(ii+sb, n);
int l = imin(sb, imax(0, ni-ii));
plasma_core_zparfb(
PlasmaRight, PlasmaNoTrans,
PlasmaForward, PlasmaRowwise,
mi, ib, mi, ni, sb, l,
&A1[lda1*ii+ii+sb], lda1,
&A2[ii+sb], lda2,
&A2[ii], lda2,
&T[ldt*ii], ldt,
work, m);
}
}
return PlasmaSuccess;
}
/******************************************************************************/
void plasma_core_omp_zttlqt(int m, int n, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
plasma_complex64_t *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A1[0:lda1*m]) \
depend(inout:A2[0:lda2*n]) \
depend(out:T[0:ib*m]) // T should be mxib, but is stored
// as ibxm
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
plasma_complex64_t *tau = ((plasma_complex64_t*)work.spaces[tid]);
// Call the kernel.
int info = plasma_core_zttlqt(m, n, ib,
A1, lda1,
A2, lda2,
T, ldt,
tau,
tau+m);
if (info != PlasmaSuccess) {
plasma_error("core_ztslqt() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
kmeans.c | /** @file kmeans.c
** @brief K-means - Declaration
** @author Andrea Vedaldi, David Novotny
**/
/*
Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson.
Copyright (C) 2013 Andrea Vedaldi and David Novotny.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page kmeans K-means clustering
@author Andrea Vedaldi
@author David Novotny
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@ref kmeans.h implements a number of algorithm for **K-means
quantization**: Lloyd @cite{lloyd82least}, an accelerated version by
Elkan @cite{elkan03using}, and a large scale algorithm based on
Approximate Nearest Neighbors (ANN). All algorithms support @c float
or @c double data and can use the $l^1$ or the $l^2$ distance for
clustering. Furthermore, all algorithms can take advantage of multiple
CPU cores.
Please see @subpage kmeans-fundamentals for a technical description of
K-means and of the algorithms implemented here.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-starting Getting started
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The goal of K-means is to partition a dataset into $K$
“compact” clusters. The following example demonstrates
using @ref kmeans.h in the C programming language to partition @c
numData @c float vectors into compute @c numCenters clusters using
Lloyd's algorithm:
@code
#include <vl/kmeans.h>
double energy ;
double * centers ;
// Use float data and the L2 distance for clustering
KMeans * kmeans = vl_kmeans_new (VLDistanceL2, VL_TYPE_FLOAT) ;
// Use Lloyd algorithm
vl_kmeans_set_algorithm (kmeans, VlKMeansLloyd) ;
// Initialize the cluster centers by randomly sampling the data
vl_kmeans_init_centers_with_rand_data (kmeans, data, dimension, numData, numCenters) ;
// Run at most 100 iterations of cluster refinement using Lloyd algorithm
vl_kmeans_set_max_num_iterations (kmeans, 100) ;
vl_kmeans_refine_centers (kmeans, data, numData) ;
// Obtain the energy of the solution
energy = vl_kmeans_get_energy(kmeans) ;
// Obtain the cluster centers
centers = vl_kmeans_get_centers(kmeans) ;
@endcode
Once the centers have been obtained, new data points can be assigned
to clusters by using the ::vl_kmeans_quantize function:
@code
vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData) ;
float * distances = vl_malloc(sizeof(float) * numData) ;
vl_kmeans_quantize(kmeans, assignments, distances, data, numData) ;
@endcode
Alternatively, one can directly assign new pointers to the closest
centers, without bothering with a ::VlKMeans object.
There are several considerations that may impact the performance of
KMeans. First, since K-means is usually based local optimization
algorithm, the **initialization method** is important. The following
initialization methods are supported:
Method | Function | Description
---------------|-----------------------------------------|-----------------------------------------------
Random samples | ::vl_kmeans_init_centers_with_rand_data | Random data points
K-means++ | ::vl_kmeans_init_centers_plus_plus | Random selection biased towards diversity
Custom | ::vl_kmeans_set_centers | Choose centers (useful to run quantization only)
See @ref kmeans-init for further details. The initialization methods
use a randomized selection of the data points; the random number
generator init is controlled by ::vl_rand_init.
The second important choice is the **optimization algorithm**. The
following optimization algorithms are supported:
Algorithm | Symbol | See | Description
------------|------------------|-------------------|-----------------------------------------------
Lloyd | ::VlKMeansLloyd | @ref kmeans-lloyd | Alternate EM-style optimization
Elkan | ::VlKMeansElkan | @ref kmeans-elkan | A speedup using triangular inequalities
ANN | ::VlKMeansANN | @ref kmeans-ann | A speedup using approximated nearest neighbors
See the relative sections for further details. These algorithm are
iterative, and stop when either a **maximum number of iterations**
(::vl_kmeans_set_max_num_iterations) is reached, or when the energy
changes sufficiently slowly in one iteration (::vl_kmeans_set_min_energy_variation).
All the three algorithms support multithreaded computations. The number
of threads used is usually controlled globally by ::vl_set_num_threads.
**/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page kmeans-fundamentals K-means fundamentals
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
Given $n$ points $\bx_1,\dots,\bx_n \in \real^d$, the goal of K-means
is find $K$ `centers` $\bc_1,\dots,\bc_m \in \real^d$ and
`assignments` $q_1,\dots,q_n \in \{1,\dots,K\}$ of the points to the
centers such that the sum of distances
\[
E(\bc_1,\dots,\bc_k,q_1,\dots,q_n)
= \sum_{i=1}^n \|\bx_i - \bc_{q_i} \|_p^p
\]
is minimized. $K$-means is obtained for the case $p=2$ ($l^2$ norm),
because in this case the optimal centers are the means of the input
vectors assigned to them. Here the generalization $p=1$ ($l^1$ norm)
will also be considered.
Up to normalization, the K-means objective $E$ is also the average
reconstruction error if the original points are approximated with the
cluster centers. Thus K-means is used not only to group the input
points into cluster, but also to `quantize` their values.
K-means is widely used in computer vision, for example in the
construction of vocabularies of visual features (visual words). In
these applications the number $n$ of points to cluster and/or the
number $K$ of clusters is often large. Unfortunately, minimizing the
objective $E$ is in general a difficult combinatorial problem, so
locally optimal or approximated solutions are sought instead.
The basic K-means algorithm alternate between re-estimating the
centers and the assignments (@ref kmeans-lloyd). Combined with a good
initialization strategy (@ref kmeans-init) and, potentially, by
re-running the optimization from a number of randomized starting
states, this algorithm may attain satisfactory solutions in practice.
However, despite its simplicity, Lloyd's algorithm is often too slow.
A good replacement is Elkan's algorithm (@ref kmeans-elkan), which
uses the triangular inequality to cut down significantly the cost of
Lloyd's algorithm. Since this algorithm is otherwise equivalent, it
should often be preferred.
For very large problems (millions of point to clusters and hundreds,
thousands, or more clusters to find), even Elkan's algorithm is not
sufficiently fast. In these cases, one can resort to a variant of
Lloyd's algorithm that uses an approximated nearest neighbors routine
(@ref kmeans-ann).
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-init Initialization methods
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
All the $K$-means algorithms considered here find locally optimal
solutions; as such the way they are initialized is important. @ref
kmeans.h supports the following initialization algorithms:
@par Random data samples
The simplest initialization method is to sample $K$ points at random
from the input data and use them as initial values for the cluster
centers.
@par K-means++
@cite{arthur07k-means} proposes a randomized initialization of the
centers which improves upon random selection. The first center $\bc_1$
is selected at random from the data points $\bx_1, \dots, \bx_n $ and
the distance from this center to all points $\|\bx_i - \bc_1\|_p^p$ is
computed. Then the second center $\bc_2$ is selected at random from
the data points with probability proportional to the distance. The
procedure is repeated to obtain the other centers by using the minimum
distance to the centers collected so far.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-lloyd Lloyd's algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The most common K-means method is Lloyd's algorithm
@cite{lloyd82least}. This algorithm is based on the observation that,
while jointly optimizing clusters and assignment is difficult,
optimizing one given the other is easy. Lloyd's algorithm alternates
the steps:
1. **Quantization.** Each point $\bx_i$ is reassigned to the center
$\bc_{q_j}$ closer to it. This requires finding for each point the
closest among $K$ other points, which is potentially slow.
2. **Center estimation.** Each center $\bc_q$ is updated to minimize
its average distances to the points assigned to it. It is easy to
show that the best center is the mean or median of the points,
respectively if the $l^2$ or $l^1$ norm is considered.
A naive implementation of the assignment step requires $O(dnK)$
operations, where $d$ is the dimensionality of the data, $n$ the
number of data points, and $K$ the number of centers. Updating the
centers is much cheaper: $O(dn)$ operations suffice to compute the $K$
means and a slightly higher cost is required for the medians. Clearly,
the bottleneck is the assignment computation, and this is what the
other K-means algorithm try to improve.
During the iterations, it can happen that a cluster becomes empty. In
this case, K-means automatically **“restarts” the
cluster** center by selecting a training point at random.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-elkan Elkan's algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
Elkan's algorithm @cite{elkan03using} is a variation of Lloyd
alternate optimization algorithm (@ref kmeans-lloyd) that uses the
triangular inequality to avoid many distance calculations when
assigning points to clusters. While much faster than Lloyd, Elkan's
method uses storage proportional to the umber of clusters by data
points, which makes it unpractical for a very large number of
clusters.
The idea of this algorithm is that, if a center update does not move
them much, then most of the point-to-center computations can be
avoided when the point-to-center assignments are recomputed. To detect
which distances need evaluation, the triangular inequality is used to
lower and upper bound distances after a center update.
Elkan algorithms uses two key observations. First, one has
\[
\|\bx_i - \bc_{q_i}\|_p \leq \|\bc - \bc_{q_i}\|_p / 2
\quad\Rightarrow\quad
\|\bx_i - \bc_{q_i}\|_p \leq \|\bx_i - \bc\|_p.
\]
Thus if the distance between $\bx_i$ and its current center
$\bc_{q_i}$ is less than half the distance of the center $\bc_{q_i}$
to another center $\bc$, then $\bc$ can be skipped when the new
assignment for $\bx_i$ is searched. Checking this requires keeping
track of all the inter-center distances, but centers are typically a
small fraction of the training data, so overall this can be a
significant saving. In particular, if this condition is satisfied for
all the centers $\bc \not= \bc_{q_i}$, the point $\bx_i$ can be
skipped completely. Furthermore, the condition can be tested also
based on an upper bound $UB_i$ of $\|\bx_i - \bc_{q_i}\|_p$.
Second, if a center $\bc$ is updated to $\hat{\bc}$, then the new
distance from $\bx$ to $\hat{\bc}$ is bounded from below and above by
\[
\|\bx - \bc\|_p - \|bc - \hat\bc\|_p
\leq
\|\bx - \hat{\bc}\|_p
\leq
\|\bx - \hat{\bc}\|_p + \|\bc + \hat{\bc}\|_p.
\]
This allows to maintain an upper bound on the distance of $\bx_i$ to
its current center $\bc_{q_i}$ and a lower bound to any other center
$\bc$:
@f{align*}
UB_i & \leftarrow UB_i + \|\bc_{q_i} - \hat{\bc}_{q_i} \|_p \\
LB_i(\bc) & \leftarrow LB_i(\bc) - \|\bc -\hat \bc\|_p.
@f}
Thus the K-means algorithm becomes:
1. **Initialization.** Compute $LB_i(\bc) = \|\bx_i -\hat \bc\|_p$ for
all points and centers. Find the current assignments $q_i$ and
bounds $UB_i$ by finding the closest centers to each point: $UB_i =
\min_{\bc} LB_i(\bc)$.
2. **Center estimation.**
1. Recompute all the centers based on the new means; call the updated
version $\hat{\bc}$.
2. Update all the bounds based on the distance $\|\bc - \hat\bc\|_p$
as explained above.
3. Set $\bc \leftarrow \hat\bc$ for all the centers and go to the next
iteration.
3. **Quantization.**
1. Skip any point $\bx_i$ such that $UB_i \leq \frac{1}{2} \|\bc_{q_i} - \bc\|_p$
for all centers $\bc \not= \bc_{q_i}$.
2. For each remaining point $\bx_i$ and center $\bc \not= \bc_{q_i}$:
1. Skip $\bc$ if
\[
UB_i \leq \frac{1}{2} \| \bc_{q_i} - \bc \|
\quad\text{or}\quad
UB_i \leq LB_i(\bc).
\]
The first condition reflects the first observation above; the
second uses the bounds to decide if $\bc$ can be closer than the
current center $\bc_{q_i}$ to the point $\bx_i$. If the center
cannot be skipped, continue as follows.
3. Skip $\bc$ if the condition above is satisfied after making the
upper bound tight:
\[
UB_i = LB_i(\bc_{q_i}) = \| \bx_i - \bc_{q_i} \|_p.
\]
Note that the latter calculation can be done only once for $\bx_i$.
If the center cannot be skipped still, continue as follows.
4. Tighten the lower bound too:
\[
LB_i(\bc) = \| \bx_i - \bc \|_p.
\]
At this point both $UB_i$ and $LB_i(\bc)$ are tight. If $LB_i <
UB_i$, then the point $\bx_i$ should be reassigned to
$\bc$. Update $q_i$ to the index of center $\bc$ and reset $UB_i
= LB_i(\bc)$.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-ann ANN algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The *Approximate Nearest Neighbor* (ANN) K-means algorithm
@cite{beis97shape} @cite{silpa-anan08optimised} @cite{muja09fast} is a
variant of Lloyd's algorithm (@ref kmeans-lloyd) uses a best-bin-first
randomized KD-tree algorithm to approximately (and quickly) find the
closest cluster center to each point. The KD-tree implementation is
based on @ref kdtree.
The algorithm can be summarized as follows:
1. **Quantization.** Each point $\bx_i$ is reassigned to the center
$\bc_{q_j}$ closer to it. This starts by indexing the $K$ centers
by a KD-tree and then using the latter to quickly find the closest
center for every training point. The search is approximated to
further improve speed. This opens up the possibility that a data
point may receive an assignment that is *worse* than the current
one. This is avoided by checking that the new assignment estimated
by using ANN is an improvement; otherwise the old assignment is
kept.
2. **Center estimation.** Each center $\bc_q$ is updated to minimize
its average distances to the points assigned to it. It is easy to
show that the best center is the mean or median of the points,
respectively if the $l^2$ or $l^1$ norm is considered.
The key is to trade-off carefully the speedup obtained by using the
ANN algorithm and the loss in accuracy when retrieving neighbors. Due
to the curse of dimensionality, KD-trees become less effective for
higher dimensional data, so that the search cost, which in the best
case is logarithmic with this data structure, may become effectively
linear. This is somehow mitigated by the fact that new a new KD-tree
is computed at each iteration, reducing the likelihood that points may
get stuck with sub-optimal assignments.
Experiments with the quantization of 128-dimensional SIFT features
show that the ANN algorithm may use one quarter of the comparisons of
Elkan's while retaining a similar solution accuracy.
*/
#include "kmeans.h"
#include "generic.h"
#include "mathop.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* ================================================================ */
#ifndef VL_KMEANS_INSTANTIATING
/** ------------------------------------------------------------------
** @brief Reset state
**
** The function reset the state of the KMeans object. It deletes
** any stored centers, releasing the corresponding memory. This
** cancels the effect of seeding or setting the centers, but
** does not change the other configuration parameters.
**/
VL_EXPORT void
vl_kmeans_reset (VlKMeans * self)
{
self->numCenters = 0 ;
self->dimension = 0 ;
if (self->centers) vl_free(self->centers) ;
if (self->centerDistances) vl_free(self->centerDistances) ;
self->centers = NULL ;
self->centerDistances = NULL ;
}
/** ------------------------------------------------------------------
** @brief Create a new KMeans object
** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE)
** @param distance distance.
** @return new KMeans object instance.
**/
VL_EXPORT VlKMeans *
vl_kmeans_new (vl_type dataType,
VlVectorComparisonType distance)
{
VlKMeans * self = vl_calloc(1, sizeof(VlKMeans)) ;
self->algorithm = VlKMeansLloyd ;
self->distance = distance ;
self->dataType = dataType ;
self->verbosity = 0 ;
self->maxNumIterations = 100 ;
self->minEnergyVariation = 1e-4 ;
self->numRepetitions = 1 ;
self->centers = NULL ;
self->centerDistances = NULL ;
self->numTrees = 3;
self->maxNumComparisons = 100;
vl_kmeans_reset (self) ;
return self ;
}
/** ------------------------------------------------------------------
** @brief Create a new KMeans object by copy
** @param kmeans KMeans object to copy.
** @return new copy.
**/
VL_EXPORT VlKMeans *
vl_kmeans_new_copy (VlKMeans const * kmeans)
{
VlKMeans * self = vl_malloc(sizeof(VlKMeans)) ;
self->algorithm = kmeans->algorithm ;
self->distance = kmeans->distance ;
self->dataType = kmeans->dataType ;
self->verbosity = kmeans->verbosity ;
self->maxNumIterations = kmeans->maxNumIterations ;
self->numRepetitions = kmeans->numRepetitions ;
self->dimension = kmeans->dimension ;
self->numCenters = kmeans->numCenters ;
self->centers = NULL ;
self->centerDistances = NULL ;
self->numTrees = kmeans->numTrees;
self->maxNumComparisons = kmeans->maxNumComparisons;
if (kmeans->centers) {
vl_size dataSize = vl_get_type_size(self->dataType) * self->dimension * self->numCenters ;
self->centers = vl_malloc(dataSize) ;
memcpy (self->centers, kmeans->centers, dataSize) ;
}
if (kmeans->centerDistances) {
vl_size dataSize = vl_get_type_size(self->dataType) * self->numCenters * self->numCenters ;
self->centerDistances = vl_malloc(dataSize) ;
memcpy (self->centerDistances, kmeans->centerDistances, dataSize) ;
}
return self ;
}
/** ------------------------------------------------------------------
** @brief Deletes a KMeans object
** @param self KMeans object instance.
**
** The function deletes the KMeans object instance created
** by ::vl_kmeans_new.
**/
VL_EXPORT void
vl_kmeans_delete (VlKMeans * self)
{
vl_kmeans_reset (self) ;
vl_free (self) ;
}
/* an helper structure */
typedef struct _VlKMeansSortWrapper {
vl_uint32 * permutation ;
void const * data ;
vl_size stride ;
} VlKMeansSortWrapper ;
/* ---------------------------------------------------------------- */
/* Instantiate shuffle algorithm */
#define VL_SHUFFLE_type vl_uindex
#define VL_SHUFFLE_prefix _vl_kmeans
#include "shuffle-def.h"
/* #ifdef VL_KMEANS_INSTANTITATING */
#endif
/* ================================================================ */
#ifdef VL_KMEANS_INSTANTIATING
/* ---------------------------------------------------------------- */
/* Set centers */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_set_centers_, SFX)
(VlKMeans * self,
TYPE const * centers,
vl_size dimension,
vl_size numCenters)
{
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
memcpy ((TYPE*)self->centers, centers,
sizeof(TYPE) * dimension * numCenters) ;
}
/* ---------------------------------------------------------------- */
/* Random seeding */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_init_centers_with_rand_data_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex i, j, k ;
VlRand * rand = vl_get_rand () ;
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
{
vl_uindex * perm = vl_malloc (sizeof(vl_uindex) * numData) ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
TYPE * distances = vl_malloc (sizeof(TYPE) * numCenters) ;
/* get a random permutation of the data point */
for (i = 0 ; i < numData ; ++i) perm[i] = i ;
_vl_kmeans_shuffle (perm, numData, rand) ;
for (k = 0, i = 0 ; k < numCenters ; ++ i) {
/* compare the next data point to all centers collected so far
to detect duplicates (if there are enough left)
*/
if (numCenters - k < numData - i) {
vl_bool duplicateDetected = VL_FALSE ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distances,
dimension,
data + dimension * perm[i], 1,
(TYPE*)self->centers, k,
distFn) ;
for (j = 0 ; j < k ; ++j) {
duplicateDetected |= (distances[j] == 0) ;
}
if (duplicateDetected) continue ;
}
/* ok, it is not a duplicate so we can accept it! */
memcpy ((TYPE*)self->centers + dimension * k,
data + dimension * perm[i],
sizeof(TYPE) * dimension) ;
k ++ ;
}
vl_free(distances) ;
vl_free(perm) ;
}
}
/* ---------------------------------------------------------------- */
/* kmeans++ seeding */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_init_centers_plus_plus_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex x, c ;
VlRand * rand = vl_get_rand () ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
TYPE * minDistances = vl_malloc (sizeof(TYPE) * numData) ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
for (x = 0 ; x < numData ; ++x) {
minDistances[x] = (TYPE) VL_INFINITY_D ;
}
/* select the first point at random */
x = vl_rand_uindex (rand, numData) ;
c = 0 ;
while (1) {
TYPE energy = 0 ;
TYPE acc = 0 ;
TYPE thresh = (TYPE) vl_rand_real1 (rand) ;
memcpy ((TYPE*)self->centers + c * dimension,
data + x * dimension,
sizeof(TYPE) * dimension) ;
c ++ ;
if (c == numCenters) break ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)
(distances,
dimension,
(TYPE*)self->centers + (c - 1) * dimension, 1,
data, numData,
distFn) ;
for (x = 0 ; x < numData ; ++x) {
minDistances[x] = VL_MIN(minDistances[x], distances[x]) ;
energy += minDistances[x] ;
}
for (x = 0 ; x < numData - 1 ; ++x) {
acc += minDistances[x] ;
if (acc >= thresh * energy) break ;
}
}
vl_free(distances) ;
vl_free(minDistances) ;
}
/* ---------------------------------------------------------------- */
/* Quantization */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_quantize_, SFX)
(VlKMeans * self,
vl_uint32 * assignments,
TYPE * distances,
TYPE const * data,
vl_size numData)
{
vl_index i ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
#ifdef _OPENMP
#pragma omp parallel \
shared(self, distances, assignments, numData, distFn, data) \
num_threads(vl_get_max_threads())
#endif
{
/* vl_malloc cannot be used here if mapped to MATLAB malloc */
TYPE * distanceToCenters = malloc(sizeof(TYPE) * self->numCenters) ;
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0 ; i < (signed)numData ; ++i) {
vl_uindex k ;
TYPE bestDistance = (TYPE) VL_INFINITY_D ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distanceToCenters,
self->dimension,
data + self->dimension * i, 1,
(TYPE*)self->centers, self->numCenters,
distFn) ;
for (k = 0 ; k < self->numCenters ; ++k) {
if (distanceToCenters[k] < bestDistance) {
bestDistance = distanceToCenters[k] ;
assignments[i] = (vl_uint32)k ;
}
}
if (distances) distances[i] = bestDistance ;
}
free(distanceToCenters) ;
}
}
/* ---------------------------------------------------------------- */
/* ANN quantization */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_quantize_ann_, SFX)
(VlKMeans * self,
vl_uint32 * assignments,
TYPE * distances,
TYPE const * data,
vl_size numData,
vl_bool update)
{
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
VlKDForest * forest = vl_kdforest_new(self->dataType,self->dimension,self->numTrees, self->distance) ;
vl_kdforest_set_max_num_comparisons(forest,self->maxNumComparisons);
vl_kdforest_set_thresholding_method(forest,VL_KDTREE_MEDIAN);
vl_kdforest_build(forest,self->numCenters,self->centers);
#ifdef _OPENMP
#pragma omp parallel \
num_threads(vl_get_max_threads()) \
shared(self, forest, update, assignments, distances, data, numData, distFn)
#endif
{
VlKDForestNeighbor neighbor ;
VlKDForestSearcher * searcher ;
vl_index x;
#ifdef _OPENMP
#pragma omp critical
#endif
searcher = vl_kdforest_new_searcher (forest) ;
#ifdef _OPENMP
#pragma omp for
#endif
for(x = 0 ; x < (signed)numData ; ++x) {
vl_kdforestsearcher_query (searcher, &neighbor, 1, (TYPE const *) (data + x*self->dimension));
if (distances) {
if(!update) {
distances[x] = (TYPE) neighbor.distance;
assignments[x] = (vl_uint32) neighbor.index ;
} else {
TYPE prevDist = (TYPE) distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension *assignments[x]);
if (prevDist > (TYPE) neighbor.distance) {
distances[x] = (TYPE) neighbor.distance ;
assignments[x] = (vl_uint32) neighbor.index ;
} else {
distances[x] = prevDist ;
}
}
} else {
assignments[x] = (vl_uint32) neighbor.index ;
}
} /* end for */
} /* end of parallel region */
vl_kdforest_delete(forest);
}
/* ---------------------------------------------------------------- */
/* Helper functions */
/* ---------------------------------------------------------------- */
/* The sorting routine is used to find increasing permutation of each
* data dimension. This is used to quickly find the median for l1
* distance clustering. */
VL_INLINE TYPE
VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp)
(VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB)
{
return
((TYPE*)array->data) [array->permutation[indexA] * array->stride]
-
((TYPE*)array->data) [array->permutation[indexB] * array->stride] ;
}
VL_INLINE void
VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap)
(VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB)
{
vl_uint32 tmp = array->permutation[indexA] ;
array->permutation[indexA] = array->permutation[indexB] ;
array->permutation[indexB] = tmp ;
}
#define VL_QSORT_prefix VL_XCAT3(_vl_kmeans_, SFX, _qsort)
#define VL_QSORT_array VlKMeansSortWrapper*
#define VL_QSORT_cmp VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp)
#define VL_QSORT_swap VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap)
#include "qsort-def.h"
static void
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)
(VlKMeans * self, vl_uint32 * permutations, TYPE const * data, vl_size numData)
{
vl_uindex d, x ;
for (d = 0 ; d < self->dimension ; ++d) {
VlKMeansSortWrapper array ;
array.permutation = permutations + d * numData ;
array.data = data + d ;
array.stride = self->dimension ;
for (x = 0 ; x < numData ; ++x) {
array.permutation[x] = (vl_uint32)x ;
}
VL_XCAT3(_vl_kmeans_, SFX, _qsort_sort)(&array, numData) ;
}
}
/* ---------------------------------------------------------------- */
/* Lloyd refinement */
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size c, d, x, iteration ;
double previousEnergy = VL_INFINITY_D ;
double initialEnergy = VL_INFINITY_D ;
double energy ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
VlRand * rand = vl_get_rand () ;
vl_size totNumRestartedCenters = 0 ;
vl_size numRestartedCenters = 0 ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
for (energy = VL_INFINITY_D,
iteration = 0;
1 ;
++ iteration) {
/* assign data to cluters */
VL_XCAT(_vl_kmeans_quantize_, SFX)(self, assignments, distances, data, numData) ;
/* compute energy */
energy = 0 ;
for (x = 0 ; x < numData ; ++x) energy += distances[x] ;
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd iter %d: energy = %g\n", iteration,
energy) ;
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd terminating because maximum number of iterations reached\n") ;
}
break ;
}
if (energy == previousEnergy) {
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd terminating because the algorithm fully converged\n") ;
}
break ;
}
if (iteration == 0) {
initialEnergy = energy ;
} else {
double eps = (previousEnergy - energy) / (initialEnergy - energy) ;
if (eps < self->minEnergyVariation) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ;
}
break ;
}
}
/* begin next iteration */
previousEnergy = energy ;
/* update clusters */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
numRestartedCenters = 0 ;
switch (self->distance) {
case VlDistanceL2:
memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < numData ; ++x) {
TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
((TYPE*)self->centers) [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
}
break ;
default:
abort();
} /* done compute centers */
totNumRestartedCenters += numRestartedCenters ;
if (self->verbosity && numRestartedCenters) {
VL_PRINTF("kmeans: Lloyd iter %d: restarted %d centers\n", iteration,
numRestartedCenters) ;
}
} /* next Lloyd iteration */
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
return energy ;
}
static double
VL_XCAT(_vl_kmeans_update_center_distances_, SFX)
(VlKMeans * self)
{
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
if (! self->centerDistances) {
self->centerDistances = vl_malloc (sizeof(TYPE) *
self->numCenters *
self->numCenters) ;
}
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(self->centerDistances,
self->dimension,
self->centers, self->numCenters,
NULL, 0,
distFn) ;
return self->numCenters * (self->numCenters - 1) / 2 ;
}
static double
VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size c, d, x, iteration ;
double initialEnergy = VL_INFINITY_D ;
double previousEnergy = VL_INFINITY_D ;
double energy ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
VlRand * rand = vl_get_rand () ;
vl_size totNumRestartedCenters = 0 ;
vl_size numRestartedCenters = 0 ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
for (energy = VL_INFINITY_D,
iteration = 0;
1 ;
++ iteration) {
/* assign data to cluters */
VL_XCAT(_vl_kmeans_quantize_ann_, SFX)(self, assignments, distances, data, numData, iteration > 0) ;
/* compute energy */
energy = 0 ;
for (x = 0 ; x < numData ; ++x) energy += distances[x] ;
if (self->verbosity) {
VL_PRINTF("kmeans: ANN iter %d: energy = %g\n", iteration,
energy) ;
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the maximum number of iterations has been reached\n") ;
}
break ;
}
if (energy == previousEnergy) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the algorithm fully converged\n") ;
}
break ;
}
if (iteration == 0) {
initialEnergy = energy ;
} else {
double eps = (previousEnergy - energy) / (initialEnergy - energy) ;
if (eps < self->minEnergyVariation) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ;
}
break ;
}
}
/* begin next iteration */
previousEnergy = energy ;
/* update clusters */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
numRestartedCenters = 0 ;
switch (self->distance) {
case VlDistanceL2:
memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < numData ; ++x) {
TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
((TYPE*)self->centers) [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
}
break ;
default:
VL_PRINT("bad distance set: %d\n",self->distance);
abort();
} /* done compute centers */
totNumRestartedCenters += numRestartedCenters ;
if (self->verbosity && numRestartedCenters) {
VL_PRINTF("kmeans: ANN iter %d: restarted %d centers\n", iteration,
numRestartedCenters) ;
}
}
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
return energy ;
}
/* ---------------------------------------------------------------- */
/* Elkan refinement */
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size d, iteration ;
vl_index x ;
vl_uint32 c, j ;
vl_bool allDone ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
VlRand * rand = vl_get_rand () ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
TYPE * nextCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ;
TYPE * pointToClosestCenterUB = vl_malloc (sizeof(TYPE) * numData) ;
vl_bool * pointToClosestCenterUBIsStrict = vl_malloc (sizeof(vl_bool) * numData) ;
TYPE * pointToCenterLB = vl_malloc (sizeof(TYPE) * numData * self->numCenters) ;
TYPE * newCenters = vl_malloc(sizeof(TYPE) * self->dimension * self->numCenters) ;
TYPE * centerToNewCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
double energy ;
vl_size totDistanceComputationsToInit = 0 ;
vl_size totDistanceComputationsToRefreshUB = 0 ;
vl_size totDistanceComputationsToRefreshLB = 0 ;
vl_size totDistanceComputationsToRefreshCenterDistances = 0 ;
vl_size totDistanceComputationsToNewCenters = 0 ;
vl_size totDistanceComputationsToFinalize = 0 ;
vl_size totNumRestartedCenters = 0 ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Initialization */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* An iteration is: get_new_centers + reassign + get_energy.
This counts as iteration 0, where get_new_centers is assumed
to be performed before calling the train function by
the initialization function */
/* update distances between centers */
totDistanceComputationsToInit +=
VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ;
/* assigmen points to the initial centers and initialize bounds */
memset(pointToCenterLB, 0, sizeof(TYPE) * self->numCenters * numData) ;
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE distance ;
/* do the first center */
assignments[x] = 0 ;
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + 0) ;
pointToClosestCenterUB[x] = distance ;
pointToClosestCenterUBIsStrict[x] = VL_TRUE ;
pointToCenterLB[0 + x * self->numCenters] = distance ;
totDistanceComputationsToInit += 1 ;
/* do other centers */
for (c = 1 ; c < self->numCenters ; ++c) {
/* Can skip if the center assigned so far is twice as close
as its distance to the center under consideration */
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <=
((TYPE*)self->centerDistances)
[c + assignments[x] * self->numCenters]) {
continue ;
}
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
pointToCenterLB[c + x * self->numCenters] = distance ;
totDistanceComputationsToInit += 1 ;
if (distance < pointToClosestCenterUB[x]) {
pointToClosestCenterUB[x] = distance ;
assignments[x] = c ;
}
}
}
/* compute UB on energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++x) {
energy += pointToClosestCenterUB[x] ;
}
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan iter 0: energy = %g, dist. calc. = %d\n",
energy, totDistanceComputationsToInit) ;
}
/* #define SANITY*/
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies after initial assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f\n",
cc, xx, a, b) ;
}
}
}
#endif
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Iterations */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
for (iteration = 1 ; 1; ++iteration) {
vl_size numDistanceComputationsToRefreshUB = 0 ;
vl_size numDistanceComputationsToRefreshLB = 0 ;
vl_size numDistanceComputationsToRefreshCenterDistances = 0 ;
vl_size numDistanceComputationsToNewCenters = 0 ;
vl_size numRestartedCenters = 0 ;
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Compute new centers */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < (signed)numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
switch (self->distance) {
case VlDistanceL2:
memset(newCenters, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE * cpt = newCenters + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = newCenters + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
/* restart the center */
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < (signed)numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
newCenters [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = newCenters + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
default:
abort();
} /* done compute centers */
/* compute the distance from the old centers to the new centers */
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE distance = distFn(self->dimension,
newCenters + c * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
centerToNewCenterDistances[c] = distance ;
numDistanceComputationsToNewCenters += 1 ;
}
/* make the new centers current */
{
TYPE * tmp = self->centers ;
self->centers = newCenters ;
newCenters = tmp ;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Reassign points to a centers */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/*
Update distances between centers.
*/
numDistanceComputationsToRefreshCenterDistances
+= VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ;
for (c = 0 ; c < self->numCenters ; ++c) {
nextCenterDistances[c] = (TYPE) VL_INFINITY_D ;
for (j = 0 ; j < self->numCenters ; ++j) {
if (j == c) continue ;
nextCenterDistances[c] = VL_MIN(nextCenterDistances[c],
((TYPE*)self->centerDistances)
[j + c * self->numCenters]) ;
}
}
/*
Update upper bounds on point-to-closest-center distances
based on the center variation.
*/
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE a = pointToClosestCenterUB[x] ;
TYPE b = centerToNewCenterDistances[assignments[x]] ;
if (self->distance == VlDistanceL1) {
pointToClosestCenterUB[x] = a + b ;
} else {
#if (FLT == VL_TYPE_FLOAT)
TYPE sqrtab = sqrtf (a * b) ;
#else
TYPE sqrtab = sqrt (a * b) ;
#endif
pointToClosestCenterUB[x] = a + b + 2.0 * sqrtab ;
}
pointToClosestCenterUBIsStrict[x] = VL_FALSE ;
}
/*
Update lower bounds on point-to-center distances
based on the center variation.
*/
#if defined(_OPENMP)
#pragma omp parallel for default(shared) private(x,c) num_threads(vl_get_max_threads())
#endif
for (x = 0 ; x < (signed)numData ; ++x) {
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE a = pointToCenterLB[c + x * self->numCenters] ;
TYPE b = centerToNewCenterDistances[c] ;
if (a < b) {
pointToCenterLB[c + x * self->numCenters] = 0 ;
} else {
if (self->distance == VlDistanceL1) {
pointToCenterLB[c + x * self->numCenters] = a - b ;
} else {
#if (FLT == VL_TYPE_FLOAT)
TYPE sqrtab = sqrtf (a * b) ;
#else
TYPE sqrtab = sqrt (a * b) ;
#endif
pointToCenterLB[c + x * self->numCenters] = a + b - 2.0 * sqrtab ;
}
}
}
}
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies before assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n",
cc, xx, a, b, assignments[xx]) ;
}
}
}
#endif
/*
Scan the data and do the reassignments. Use the bounds to
skip as many point-to-center distance calculations as possible.
*/
allDone = VL_TRUE ;
#if defined(_OPENMP)
#pragma omp parallel for \
\
shared(self,numData, \
pointToClosestCenterUB,pointToCenterLB, \
nextCenterDistances,pointToClosestCenterUBIsStrict, \
assignments,data,distFn,allDone) \
private(c,x) \
reduction(+:numDistanceComputationsToRefreshUB,numDistanceComputationsToRefreshLB) \
num_threads(vl_get_max_threads())
#endif
for (x = 0 ; x < (signed)numData ; ++ x) {
/*
A point x sticks with its current center assignmets[x]
the UB to d(x, c[assigmnets[x]]) is not larger than half
the distance of c[assigments[x]] to any other center c.
*/
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= nextCenterDistances[assignments[x]]) {
continue ;
}
for (c = 0 ; c < self->numCenters ; ++c) {
vl_uint32 cx = assignments[x] ;
TYPE distance ;
/* The point is not reassigned to a given center c
if either:
0 - c is already the assigned center
1 - The UB of d(x, c[assignments[x]]) is smaller than half
the distance of c[assigments[x]] to c, OR
2 - The UB of d(x, c[assignmets[x]]) is smaller than the
LB of the distance of x to c.
*/
if (cx == c) {
continue ;
}
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances)
[c + cx * self->numCenters]) {
continue ;
}
if (pointToClosestCenterUB[x] <= pointToCenterLB
[c + x * self->numCenters]) {
continue ;
}
/* If the UB is loose, try recomputing it and test again */
if (! pointToClosestCenterUBIsStrict[x]) {
distance = distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension * cx) ;
pointToClosestCenterUB[x] = distance ;
pointToClosestCenterUBIsStrict[x] = VL_TRUE ;
pointToCenterLB[cx + x * self->numCenters] = distance ;
numDistanceComputationsToRefreshUB += 1 ;
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances)
[c + cx * self->numCenters]) {
continue ;
}
if (pointToClosestCenterUB[x] <= pointToCenterLB
[c + x * self->numCenters]) {
continue ;
}
}
/*
Now the UB is strict (equal to d(x, assignments[x])), but
we still could not exclude that x should be reassigned to
c. We therefore compute the distance, update the LB,
and check if a reassigmnet must be made
*/
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
numDistanceComputationsToRefreshLB += 1 ;
pointToCenterLB[c + x * self->numCenters] = distance ;
if (distance < pointToClosestCenterUB[x]) {
assignments[x] = c ;
pointToClosestCenterUB[x] = distance ;
allDone = VL_FALSE ;
/* the UB strict flag is already set here */
}
} /* assign center */
} /* next data point */
totDistanceComputationsToRefreshUB
+= numDistanceComputationsToRefreshUB ;
totDistanceComputationsToRefreshLB
+= numDistanceComputationsToRefreshLB ;
totDistanceComputationsToRefreshCenterDistances
+= numDistanceComputationsToRefreshCenterDistances ;
totDistanceComputationsToNewCenters
+= numDistanceComputationsToNewCenters ;
totNumRestartedCenters
+= numRestartedCenters ;
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies after assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n",
cc, xx, a, b, assignments[xx]) ;
}
}
}
#endif
/* compute UB on energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++x) {
energy += pointToClosestCenterUB[x] ;
}
if (self->verbosity) {
vl_size numDistanceComputations =
numDistanceComputationsToRefreshUB +
numDistanceComputationsToRefreshLB +
numDistanceComputationsToRefreshCenterDistances +
numDistanceComputationsToNewCenters ;
VL_PRINTF("kmeans: Elkan iter %d: energy <= %g, dist. calc. = %d\n",
iteration,
energy,
numDistanceComputations) ;
if (numRestartedCenters) {
VL_PRINTF("kmeans: Elkan iter %d: restarted %d centers\n",
iteration,
energy,
numRestartedCenters) ;
}
if (self->verbosity > 1) {
VL_PRINTF("kmeans: Elkan iter %d: total dist. calc. per type: "
"UB: %.1f%% (%d), LB: %.1f%% (%d), "
"intra_center: %.1f%% (%d), "
"new_center: %.1f%% (%d)\n",
iteration,
100.0 * numDistanceComputationsToRefreshUB / numDistanceComputations,
numDistanceComputationsToRefreshUB,
100.0 *numDistanceComputationsToRefreshLB / numDistanceComputations,
numDistanceComputationsToRefreshLB,
100.0 * numDistanceComputationsToRefreshCenterDistances / numDistanceComputations,
numDistanceComputationsToRefreshCenterDistances,
100.0 * numDistanceComputationsToNewCenters / numDistanceComputations,
numDistanceComputationsToNewCenters) ;
}
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan terminating because maximum number of iterations reached\n") ;
}
break ;
}
if (allDone) {
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan terminating because the algorithm fully converged\n") ;
}
break ;
}
} /* next Elkan iteration */
/* compute true energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++ x) {
vl_uindex cx = assignments [x] ;
energy += distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension * cx) ;
totDistanceComputationsToFinalize += 1 ;
}
{
vl_size totDistanceComputations =
totDistanceComputationsToInit +
totDistanceComputationsToRefreshUB +
totDistanceComputationsToRefreshLB +
totDistanceComputationsToRefreshCenterDistances +
totDistanceComputationsToNewCenters +
totDistanceComputationsToFinalize ;
double saving = (double)totDistanceComputations
/ (iteration * self->numCenters * numData) ;
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan: total dist. calc.: %d (%.2f %% of Lloyd)\n",
totDistanceComputations, saving * 100.0) ;
if (totNumRestartedCenters) {
VL_PRINTF("kmeans: Elkan: there have been %d restarts\n",
totNumRestartedCenters) ;
}
}
if (self->verbosity > 1) {
VL_PRINTF("kmeans: Elkan: total dist. calc. per type: "
"init: %.1f%% (%d), UB: %.1f%% (%d), LB: %.1f%% (%d), "
"intra_center: %.1f%% (%d), "
"new_center: %.1f%% (%d), "
"finalize: %.1f%% (%d)\n",
100.0 * totDistanceComputationsToInit / totDistanceComputations,
totDistanceComputationsToInit,
100.0 * totDistanceComputationsToRefreshUB / totDistanceComputations,
totDistanceComputationsToRefreshUB,
100.0 *totDistanceComputationsToRefreshLB / totDistanceComputations,
totDistanceComputationsToRefreshLB,
100.0 * totDistanceComputationsToRefreshCenterDistances / totDistanceComputations,
totDistanceComputationsToRefreshCenterDistances,
100.0 * totDistanceComputationsToNewCenters / totDistanceComputations,
totDistanceComputationsToNewCenters,
100.0 * totDistanceComputationsToFinalize / totDistanceComputations,
totDistanceComputationsToFinalize) ;
}
}
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
vl_free(nextCenterDistances) ;
vl_free(pointToClosestCenterUB) ;
vl_free(pointToClosestCenterUBIsStrict) ;
vl_free(pointToCenterLB) ;
vl_free(newCenters) ;
vl_free(centerToNewCenterDistances) ;
return energy ;
}
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
switch (self->algorithm) {
case VlKMeansLloyd:
return
VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)(self, data, numData) ;
break ;
case VlKMeansElkan:
return
VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)(self, data, numData) ;
break ;
case VlKMeansANN:
return
VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)(self, data, numData) ;
break ;
default:
abort() ;
}
}
/* VL_KMEANS_INSTANTIATING */
#else
#ifndef __DOXYGEN__
#define FLT VL_TYPE_FLOAT
#define TYPE float
#define SFX f
#define VL_KMEANS_INSTANTIATING
#include "kmeans.c"
#define FLT VL_TYPE_DOUBLE
#define TYPE double
#define SFX d
#define VL_KMEANS_INSTANTIATING
#include "kmeans.c"
#endif
/* VL_KMEANS_INSTANTIATING */
#endif
/* ================================================================ */
#ifndef VL_KMEANS_INSTANTIATING
/** ------------------------------------------------------------------
** @brief Set centers
** @param self KMeans object.
** @param centers centers to copy.
** @param dimension data dimension.
** @param numCenters number of centers.
**/
VL_EXPORT void
vl_kmeans_set_centers
(VlKMeans * self,
void const * centers,
vl_size dimension,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_set_centers_f
(self, (float const *)centers, dimension, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_set_centers_d
(self, (double const *)centers, dimension, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief init centers by randomly sampling data
** @param self KMeans object.
** @param data data to sample from.
** @param dimension data dimension.
** @param numData nmber of data points.
** @param numCenters number of centers.
**
** The function inits the KMeans centers by randomly sampling
** the data @a data.
**/
VL_EXPORT void
vl_kmeans_init_centers_with_rand_data
(VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_init_centers_with_rand_data_f
(self, (float const *)data, dimension, numData, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_init_centers_with_rand_data_d
(self, (double const *)data, dimension, numData, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Seed centers by the KMeans++ algorithm
** @param self KMeans object.
** @param data data to sample from.
** @param dimension data dimension.
** @param numData nmber of data points.
** @param numCenters number of centers.
**/
VL_EXPORT void
vl_kmeans_init_centers_plus_plus
(VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_init_centers_plus_plus_f
(self, (float const *)data, dimension, numData, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_init_centers_plus_plus_d
(self, (double const *)data, dimension, numData, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Quantize data
** @param self KMeans object.
** @param assignments data to closest center assignments (output).
** @param distances data to closest center distance (output).
** @param data data to quantize.
** @param numData number of data points to quantize.
**/
VL_EXPORT void
vl_kmeans_quantize
(VlKMeans * self,
vl_uint32 * assignments,
void * distances,
void const * data,
vl_size numData)
{
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_quantize_f
(self, assignments, distances, (float const *)data, numData) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_quantize_d
(self, assignments, distances, (double const *)data, numData) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Quantize data using approximate nearest neighbours (ANN).
** @param self KMeans object.
** @param assignments data to centers assignments (output).
** @param distances data to closes center distance (output)
** @param data data to quantize.
** @param numData number of data points.
** @param update choose wether to update current assignments.
**
** The function uses an ANN procedure to compute the approximate
** nearest neighbours of the input data point.
**
** Setting @a update to ::VL_TRUE will cause the algorithm
** to *update existing assignments*. This means that each
** element of @a assignments and @a distances is updated ony if the
** ANN procedure can find a better assignment of the existing one.
**/
VL_EXPORT void
vl_kmeans_quantize_ann
(VlKMeans * self,
vl_uint32 * assignments,
void * distances,
void const * data,
vl_size numData,
vl_bool update)
{
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_quantize_ann_f
(self, assignments, distances, (float const *)data, numData, update) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_quantize_ann_d
(self, assignments, distances, (double const *)data, numData, update) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Refine center locations.
** @param self KMeans object.
** @param data data to quantize.
** @param numData number of data points.
** @return K-means energy at the end of optimization.
**
** The function calls the underlying K-means quantization algorithm
** (@ref VlKMeansAlgorithm) to quantize the specified data @a data.
** The function assumes that the cluster centers have already
** been assigned by using one of the seeding functions, or by
** setting them.
**/
VL_EXPORT double
vl_kmeans_refine_centers
(VlKMeans * self,
void const * data,
vl_size numData)
{
assert (self->centers) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
return
_vl_kmeans_refine_centers_f
(self, (float const *)data, numData) ;
case VL_TYPE_DOUBLE :
return
_vl_kmeans_refine_centers_d
(self, (double const *)data, numData) ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Cluster data.
** @param self KMeans object.
** @param data data to quantize.
** @param dimension data dimension.
** @param numData number of data points.
** @param numCenters number of clusters.
** @return K-means energy at the end of optimization.
**
** The function initializes the centers by using the initialization
** algorithm set by ::vl_kmeans_set_initialization and refines them
** by the quantization algorithm set by ::vl_kmeans_set_algorithm.
** The process is repeated one or more times (see
** ::vl_kmeans_set_num_repetitions) and the resutl with smaller
** energy is retained.
**/
VL_EXPORT double
vl_kmeans_cluster (VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex repetition ;
double bestEnergy = VL_INFINITY_D ;
void * bestCenters = NULL ;
for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) {
double energy ;
double timeRef ;
if (self->verbosity) {
VL_PRINTF("kmeans: repetition %d of %d\n", repetition + 1, self->numRepetitions) ;
}
timeRef = vl_get_cpu_time() ;
switch (self->initialization) {
case VlKMeansRandomSelection :
vl_kmeans_init_centers_with_rand_data (self,
data, dimension, numData,
numCenters) ;
break ;
case VlKMeansPlusPlus :
vl_kmeans_init_centers_plus_plus (self,
data, dimension, numData,
numCenters) ;
break ;
default:
abort() ;
}
if (self->verbosity) {
VL_PRINTF("kmeans: K-means initialized in %.2f s\n",
vl_get_cpu_time() - timeRef) ;
}
timeRef = vl_get_cpu_time () ;
energy = vl_kmeans_refine_centers (self, data, numData) ;
if (self->verbosity) {
VL_PRINTF("kmeans: K-means terminated in %.2f s with energy %g\n",
vl_get_cpu_time() - timeRef, energy) ;
}
/* copy centers to output if current solution is optimal */
/* check repetition == 0 as well in case energy = NaN, which */
/* can happen if the data contain NaNs */
if (energy < bestEnergy || repetition == 0) {
void * temp ;
bestEnergy = energy ;
if (bestCenters == NULL) {
bestCenters = vl_malloc(vl_get_type_size(self->dataType) *
self->dimension *
self->numCenters) ;
}
/* swap buffers */
temp = bestCenters ;
bestCenters = self->centers ;
self->centers = temp ;
} /* better energy */
} /* next repetition */
vl_free (self->centers) ;
self->centers = bestCenters ;
return bestEnergy ;
}
/* VL_KMEANS_INSTANTIATING */
#endif
#undef SFX
#undef TYPE
#undef FLT
#undef VL_KMEANS_INSTANTIATING
|
resize-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file resize-inl.h
* \brief image resize operator using opencv and only support bilinear resize
* \author Jake Lee
*/
#ifndef MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
#define MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
#include <mxnet/base.h>
#include <vector>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "image_utils.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
using namespace mshadow;
#if MXNET_USE_CUDA
template<typename DType, typename T, typename Acctype>
void ResizeImplCUDA(Stream<gpu> *s,
const T input,
const T output);
#endif // MXNET_USE_CUDA
struct ResizeParam : public dmlc::Parameter<ResizeParam> {
mxnet::Tuple<int> size;
bool keep_ratio;
int interp;
DMLC_DECLARE_PARAMETER(ResizeParam) {
DMLC_DECLARE_FIELD(size)
.set_default(mxnet::Tuple<int>())
.describe("Size of new image. Could be (width, height) or (size)");
DMLC_DECLARE_FIELD(keep_ratio)
.describe("Whether to resize the short edge or both edges to `size`, "
"if size is give as an integer.")
.set_default(false);
DMLC_DECLARE_FIELD(interp)
.set_default(1)
.describe("Interpolation method for resizing. By default uses bilinear interpolation"
"Options are INTER_NEAREST - a nearest-neighbor interpolation"
"INTER_LINEAR - a bilinear interpolation"
"INTER_AREA - resampling using pixel area relation"
"INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood"
"INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood"
"Note that the GPU version only support bilinear interpolation(1)"
" and the result on cpu would be slightly different from gpu."
"It uses opencv resize function which tend to align center on cpu"
"while using contrib.bilinearResize2D which aligns corner on gpu");
}
};
// handle the keep ratio param
inline SizeParam GetHeightAndWidth(int data_h,
int data_w,
const ResizeParam& param) {
CHECK((param.size.ndim() == 1) || (param.size.ndim() == 2))
<< "Input size dimension must be 1 or 2, but got "
<< param.size.ndim();
int resized_h;
int resized_w;
if (param.size.ndim() == 1) {
CHECK_GT(param.size[0], 0)
<< "Input size should be greater than 0, but got "
<< param.size[0];
if (!param.keep_ratio) {
resized_h = param.size[0];
resized_w = param.size[0];
} else {
if (data_h > data_w) {
resized_w = param.size[0];
resized_h = static_cast<int>(data_h * resized_w / data_w);
} else {
resized_h = param.size[0];
resized_w = static_cast<int>(data_w * resized_h / data_h);
}
}
} else {
CHECK_GT(param.size[0], 0)
<< "Input width should be greater than 0, but got "
<< param.size[0];
CHECK_GT(param.size[1], 0)
<< "Input height should be greater than 0, but got "
<< param.size[1];
resized_h = param.size[1];
resized_w = param.size[0];
}
return SizeParam(resized_h, resized_w);
}
inline bool ResizeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
// input attrs should only be (h, w, c) or (n, h, w, c)
CHECK((in_attrs->at(0).ndim() == 3U) || (in_attrs->at(0).ndim() == 4U))
<< "Input image dimension should be 3 or 4 but got "
<< in_attrs->at(0).ndim();
const auto& ishape = (*in_attrs)[0];
const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed);
SizeParam size;
if (ishape.ndim() == 3) {
size = GetHeightAndWidth(ishape[H], ishape[W], param);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({size.height, size.width, ishape[C]}));
} else {
size = GetHeightAndWidth(ishape[kH], ishape[kW], param);
SHAPE_ASSIGN_CHECK(*out_attrs, 0,
mxnet::TShape({ishape[N], size.height, size.width, ishape[kC]}));
}
return true;
}
inline void ResizeImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const int height,
const int width,
const int interp,
const int input_index = 0,
const int output_index = 0) {
#if MXNET_USE_OPENCV
CHECK_NE(inputs[0].type_flag_, mshadow::kFloat16) << "opencv image mat doesn't support fp16";
CHECK((inputs[0].type_flag_ != mshadow::kInt32) || (inputs[0].type_flag_ != mshadow::kInt64))
<< "opencv resize doesn't support int32, int64";
// mapping to opencv matrix element type according to channel
const int DTYPE[] = {CV_32F, CV_64F, -1, CV_8U, CV_32S};
if (inputs[0].ndim() == 3) {
const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[C]);
cv::Mat buf(inputs[0].shape_[H], inputs[0].shape_[W], cv_type, inputs[0].dptr_);
cv::Mat dst(outputs[0].shape_[H], outputs[0].shape_[W], cv_type, outputs[0].dptr_);
cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp);
CHECK(!dst.empty());
CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr_);
} else {
const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[kC]);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
cv::Mat buf(inputs[0].shape_[kH], inputs[0].shape_[kW], cv_type,
inputs[0].dptr<DType>() + input_index);
cv::Mat dst(outputs[0].shape_[kH], outputs[0].shape_[kW], cv_type,
outputs[0].dptr<DType>() + output_index);
cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp);
CHECK(!dst.empty());
CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr<DType>() + output_index);
});
}
#else
LOG(FATAL) << "Build with USE_OPENCV=1 for image resize operator.";
#endif // MXNET_USE_OPENCV
}
template <typename xpu>
inline void Resize(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(outputs.size(), 1U);
const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed);
SizeParam size;
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
CHECK(param.interp == 1) << "interp should be 1 for using Resize on GPU.";
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
if (inputs[0].ndim() == 3) {
Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s);
Tensor<gpu, 3, DType> output = outputs[0].get<gpu, 3, DType>(s);
ResizeImplCUDA<DType, Tensor<gpu, 3, DType>, float>(s, input, output);
} else {
Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s);
Tensor<gpu, 4, DType> output = outputs[0].get<gpu, 4, DType>(s);
ResizeImplCUDA<DType, Tensor<gpu, 4, DType>, float>(s, input, output);
}
});
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
size = GetHeightAndWidth(inputs[0].shape_[H], inputs[0].shape_[W], param);
ResizeImpl(inputs, outputs, size.height, size.width, param.interp);
} else {
size = GetHeightAndWidth(inputs[0].shape_[kH], inputs[0].shape_[kW], param);
const auto batch_size = inputs[0].shape_[N];
const auto input_step = inputs[0].shape_[kH] * inputs[0].shape_[kW] * inputs[0].shape_[kC];
const auto output_step = size.height * size.width * inputs[0].shape_[kC];
#pragma omp parallel for
for (auto i = 0; i < batch_size; ++i) {
ResizeImpl(inputs, outputs, size.height, size.width,
param.interp, i * input_step, i * output_step);
}
}
}
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
|
GB_unop__erf_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__erf_fp32_fp32)
// op(A') function: GB (_unop_tran__erf_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = erff (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = erff (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = erff (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ERF || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__erf_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = erff (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = erff (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__erf_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kmedian.h | #pragma once
#ifndef FGC_KMEDIAN_H__
#define FGC_KMEDIAN_H__
#include "minicore/optim/kmeans.h"
#include "minicore/util/csc.h"
#include <algorithm>
namespace minicore {
namespace coresets {
using namespace blz;
template<typename VT, bool TF, typename FT, typename IT>
static INLINE void __assign(blaze::DenseVector<VT, TF> &vec, IT ind, FT val) {
(*vec)[ind] = val;
}
#if 1
template<typename VT, bool TF, typename FT, typename IT>
static INLINE void __assign(blaze::SparseVector<VT, TF> &vec, IT ind, FT val) {
static_assert(std::is_integral_v<IT>, "Sanity1");
static_assert(std::is_arithmetic_v<FT>, "Sanity2");
auto &rr = *vec;
if(val != FT(0.)) {
if(rr.capacity() <= rr.nonZeros() + 1)
rr.reserve(std::max((rr.nonZeros() + 1) << 1, size_t(4)));
rr.append(ind, val);
}
}
#else
template<typename VT, bool TF, typename FT, typename IT>
static INLINE void __assign(blaze::SparseVector<VT, TF> &vec, IT ind, FT val) {
(*vec).set(ind, val);
}
#endif
namespace detail {
struct IndexCmp {
template<typename T>
bool operator()(const T x, const T y) const {return x->index() > y->index();}
template<typename T, typename IT>
bool operator()(const std::pair<T, IT> x, const std::pair<T, IT> y) const {
return this->operator()(x.first, y.first);
}
};
template<typename CI, typename IT=uint32_t>
struct IndexPQ: public std::priority_queue<std::pair<CI, IT>, std::vector<std::pair<CI, IT>>, IndexCmp> {
IndexPQ(size_t nelem) {
this->c.reserve(nelem);
}
auto &getc() {return this->c;}
const auto &getc() const {return this->c;}
auto getsorted() const {
auto tmp = getc();
std::fprintf(stderr, "pq size: %zu\n", tmp.size());
std::sort(tmp.begin(), tmp.end(), this->comp);
return tmp;
}
};
} // namespace detail
template<typename MT, bool SO, typename VT, bool TF>
void sparse_l1_unweighted_median(const blz::SparseMatrix<MT, SO> &data, blz::Vector<VT, TF> &ret) {
if((*data).rows() == 1) {
*ret = row(*data, 0);
return;
}
using FT = blaze::ElementType_t<MT>;
auto &ctr = *ret;
if constexpr(blaze::IsSparseVector_v<VT>) {
ctr.reset();
}
using CI = typename MT::ConstIterator;
const size_t nd = (*data).columns(), nr = (*data).rows(), hlf = nr / 2, odd = nr & 1;
detail::IndexPQ<CI, uint32_t> pq(nr);
std::unique_ptr<CI[]> ve(new CI[nr]);
for(unsigned i = 0; i < nr; ++i) {
auto r(row(*data, i));
pq.push(std::pair<CI, uint32_t>(r.begin(), i));
ve[i] = r.end();
}
assert(pq.size() == (*data).rows());
uint32_t cid = 0;
std::vector<FT> vals;
assert(pq.empty() || pq.top().first->index() == std::min_element(pq.getc().begin(), pq.getc().end(), [](auto x, auto y) {return x.first->index() < y.first->index();})->first->index());
// Setting all to 0 lets us simply skip elements with the wrong number of nonzeros.
while(pq.size()) {
//std::fprintf(stderr, "Top index: %zu\n", pq.top().first->index());
if constexpr(!blaze::IsSparseVector_v<VT>) {
while(cid < pq.top().first->index())
__assign(ctr, cid++, 0);
if(unlikely(cid > pq.top().first->index())) {
std::fprintf(stderr, "cid: %u. top index: %zu\n", cid, pq.top().first->index());
std::exit(1);
#if 0
auto pqs = pq.getsorted();
for(const auto v: pqs) std::fprintf(stderr, "%zu:%g\n", v.first->index(), v.first->value());
std::exit(1);
#endif
}
} else cid = pq.top().first->index();
while(pq.top().first->index() == cid) {
auto pair = pq.top();
pq.pop();
vals.push_back(pair.first->value());
if(++pair.first != ve[pair.second]) {
pq.push(pair);
} else if(pq.empty()) break;
}
const size_t vsz = vals.size();
FT val;
if(vsz < hlf) {
val = 0.;
} else {
shared::sort(vals.data(), vals.data() + vals.size());
const size_t idx = vals.size() - nr / 2 - 1;
val = odd ? vals[idx]: (vals[idx] + vals[idx + 1]) * FT(.5);
}
__assign(ctr, cid, val);
++cid;
vals.clear();
}
if constexpr(blaze::IsDenseVector_v<VT>) {
while(cid < nd) ctr[cid++] = 0;
}
}
template<typename MT, bool SO, typename VT, bool TF>
void l1_unweighted_median(const blz::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret) {
#if 0
if constexpr(blz::IsSparseMatrix_v<MT>) {
sparse_l1_unweighted_median(*data, ret);
return;
}
#endif
//std::fprintf(stderr, "%s unweighted l1 median. data shape: %zu/%zu. Return shape: %zu\n", blaze::IsDenseMatrix_v<MT> ? "Dense": "Sparse", (*data).rows(), (*data).columns(), (*ret).size());
assert((*ret).size() == (*data).columns());
auto &rr(*ret);
const auto &dr(*data);
const bool odd = dr.rows() % 2;
const size_t hlf = dr.rows() / 2;
blaze::DynamicVector<ElementType_t<MT>, blaze::columnVector> dv;
if constexpr(blaze::IsSparseVector_v<VT>) {
(*ret).reset();
}
for(size_t i = 0; i < dr.columns(); ++i) {
dv = column(dr, i);
// Should do fast copying.
shared::sort(dv.begin(), dv.end());
auto val = odd ? dv[hlf]: ElementType_t<MT>(.5) * (dv[hlf - 1] + dv[hlf]);
#if 0
std::fprintf(stderr, "val %g at %zu\n", val, i);
#endif
__assign(rr, i, val);
assert(rr[i] == val || !std::fprintf(stderr, "rr[i] %g vs %g\n", double(rr[i]), val));
}
}
template<typename MT, bool SO, typename VT, bool TF, typename Rows>
void l1_unweighted_median(const blz::Matrix<MT, SO> &_data, const Rows &rs, blz::Vector<VT, TF> &ret) {
assert((*ret).size() == (*_data).columns());
auto &rr(*ret);
const auto &dr(*_data);
const bool odd = rs.size() % 2;
const size_t hlf = rs.size() / 2;
const size_t nc = dr.columns();
blaze::DynamicMatrix<ElementType_t<MT>, SO> tmpind;
if constexpr(blaze::IsSparseVector_v<VT>) {
(*ret).reset();
}
size_t i;
for(i = 0; i < nc;) {
const unsigned nr = std::min(size_t(8), nc - i);
tmpind = trans(blaze::submatrix(blaze::rows(dr, rs.data(), rs.size()), 0, i * nr, rs.size(), nr));
for(unsigned j = 0; j < nr; ++j) {
auto r(blaze::row(tmpind, j));
shared::sort(r.begin(), r.end());
__assign(rr, i + j, odd ? r[hlf]: ElementType_t<MT>(0.5) * (r[hlf - 1] + r[hlf]));
}
i += nr;
}
}
#if 0
template<typename DataT, typename IndicesT, typename IndPtrT, typename VT2, bool TF2, typename IT=uint32_t, typename WT>
static inline void l1_median(const util::CSparseMatrix<DataT, IndicesT, IndPtrT> &data, blz::Vector<VT2, TF2> &ret, const IT *indices=(const IT *)nullptr, size_t nasn=0, const WT *weights=static_cast<WT *>(nullptr)) {
const size_t nc = data.columns();
if((*ret).size() != nc) {
(*ret).resize(nc);
}
(*ret).reset();
if(unlikely((*data).columns() > ((uint64_t(1) << (sizeof(IT) * CHAR_BIT)) - 1)))
throw std::runtime_error("Use a different index type, there are more features than fit in IT");
const size_t npoints = indices ? nasn: (*data).rows();
if(!npoints) throw std::invalid_argument("Can't take the median of no points");
using FT = blaze::CommonType_t<DataT, blz::ElementType_t<VT2>, std::decay_t<blz::ElementType_t<WT>>>;
std::vector<std::vector<std::pair<DataT, IT>>> pairs(nc); // One list of pairs per column
for(auto &p: pairs) p.reserve(npoints);
#ifdef _OPENMP
int nt;
#pragma omp parallel
{
nt = omp_get_num_threads();
}
auto mutexes = std::make_unique<std::mutex[]>(nt);
OMP_PFOR
#endif
for(size_t i = 0; i < npoints; ++i) {
size_t j = 0;
const auto rowind = indices ? size_t(indices[i]): i;
const std::pair<DataT, IT> empty(0, rowind);
auto crow = row(data, rowind);
auto cbeg = crow.begin(), cend = crow.end();
for(;cbeg != cend;++cbeg, ++j) {
const auto ind = cbeg->index();
while(j < ind) {
OMP_ONLY(std::lock_guard<std::mutex> lock(mutexes[j]);)
pairs[j++].push_back(empty);
}
OMP_ONLY(std::lock_guard<std::mutex> lock(mutexes[ind]);)
pairs[ind].push_back(std::pair<DataT, IT>(cbeg->value(), rowind));
++cbeg;
}
while(j < nc) {
OMP_ONLY(std::lock_guard<std::mutex> lock(mutexes[j]);)
pairs[j++].push_back(empty);
}
}
// First, compute sorted pairs
// Then find median for each column
OMP_PFOR_DYN
for(size_t i = 0; i < nc; ++i) {
auto &cpairs = pairs[i];
shared::sort(cpairs.begin(), cpairs.end());
FT wsum = 0., maxw = -std::numeric_limits<FT>::max();
IT maxind = -0;
for(size_t j = 0; j < npoints; ++j) {
double neww = 1.;
if(weights) neww = (*weights)[cpairs[j].second];
wsum += neww;
if(neww > maxw) maxw = neww, maxind = j;
}
if(maxw > wsum * .5) {
// Return the value of the tuple with maximum weight
__assign(*ret, i, cpairs[maxind].first);
continue;
}
FT mid = wsum * .5;
auto it = std::lower_bound(cpairs.begin(), cpairs.end(), mid,
[](std::pair<DataT, IT> x, FT y)
{
return x.first < y;
});
OMP_CRITICAL {
__assign(*ret, i, it->first == mid ? FT(.5 * (it->first + it[1].first)): FT(it[1].first));
}
}
}
#endif
template<typename MT, bool SO, typename VT2, bool TF2, typename IT=uint32_t, typename WT>
static inline void weighted_median(const blz::Matrix<MT, SO> &data, blz::Vector<VT2, TF2> &ret, const WT &weights) {
const size_t nc = (*data).columns();
if((*ret).size() != nc) {
(*ret).resize(nc);
}
if constexpr(blaze::IsSparseVector_v<VT2>) {
(*ret).reset();
}
if(unlikely((*data).columns() > ((uint64_t(1) << (sizeof(IT) * CHAR_BIT)) - 1)))
throw std::runtime_error("Use a different index type, there are more features than fit in IT");
const size_t nr = (*data).rows();
auto pairs = std::make_unique<std::pair<ElementType_t<MT>, IT>[]>(nr);
using FT = blaze::CommonType_t<blz::ElementType_t<MT>, blz::ElementType_t<VT2>, std::decay_t<decltype(weights[0])>>;
for(size_t i = 0; i < nc; ++i) {
auto col = column(*data, i);
for(size_t j = 0; j < nr; ++j)
pairs[j] = {col[j], j};
shared::sort(pairs.get(), pairs.get() + nr);
FT wsum = 0., maxw = -std::numeric_limits<FT>::max();
IT maxind = -0;
for(size_t j = 0; j < nr; ++j) {
auto neww = weights[pairs[j].second];
if(neww > maxw) maxw = neww, maxind = j;
}
if(maxw > wsum * .5) {
// Return the value of the tuple with maximum weight
__assign(*ret, i, pairs[maxind].first);
continue;
}
FT mid = wsum * .5;
auto it = std::lower_bound(pairs.get(), pairs.get() + nr, mid,
[](std::pair<ElementType_t<MT>, IT> x, FT y)
{
return x.first < y;
});
(*ret)[i] = it->first == mid ? FT(.5 * (it->first + it[1].first)): FT(it[1].first);
}
}
template<typename MT, bool SO, typename VT, bool TF, typename VT3=blz::CommonType_t<ElementType_t<MT>, ElementType_t<VT>>, typename=std::enable_if_t<std::is_arithmetic_v<VT3>>>
void l1_median(const blz::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret, const VT3 *weights=static_cast<VT3 *>(nullptr)) {
if(weights)
weighted_median(data, ret, weights);
else
l1_unweighted_median(data, ret);
}
template<typename MT, bool SO, typename VT, bool TF, typename VT3, typename=std::enable_if_t<!std::is_arithmetic_v<VT3> && !std::is_pointer_v<VT3>>>
void l1_median(const blz::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret, const VT3 &weights) {
weighted_median(data, ret, weights);
}
template<typename MT, bool SO, typename VT, bool TF, typename Rows, typename VT3=blz::CommonType_t<ElementType_t<MT>, ElementType_t<VT>>, typename=std::enable_if_t<blaze::IsRows_v<Rows>>>
void l1_median(const blz::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret, const Rows &rows, const VT3 *weights=static_cast<VT3 *>(nullptr)) {
if(weights) {
auto dr(blaze::rows(data, rows.data(), rows.size()));
const blz::CustomVector<VT3, blaze::unaligned, blaze::unpadded> cv((VT3 *)weights, (*data).rows());
blz::DynamicVector<VT3> selected_weights(blaze::elements(cv, rows.data(), rows.size()));
weighted_median(dr, ret, selected_weights.data());
} else l1_unweighted_median(data, rows, ret);
}
template<typename MT, bool SO, typename VT, bool TF, typename IT=uint64_t, typename WeightT=blz::DV<double>>
void l1_median(const blaze::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret, IT *asp, size_t nasn=0, const WeightT *weights=static_cast<WeightT *>(nullptr)) {
if(!asp) {
if(weights) {
weighted_median(data, ret, *weights);
} else {
l1_unweighted_median(data, ret);
}
} else {
if(weights) {
weighted_median(rows(*data, asp, nasn), ret, *weights);
} else {
l1_unweighted_median(rows(*data, asp, nasn), ret);
}
}
}
template<typename MT, bool SO, typename VT, bool TF, typename IT=uint64_t, typename WeightT=blz::DV<double>, typename RSums>
void tvd_median(const blaze::Matrix<MT, SO> &data, blz::Vector<VT, TF> &ret, IT *asp, size_t nasn=0, const WeightT *weights=static_cast<WeightT *>(nullptr), const RSums &rsums=RSums()) {
return l1_median(*data % blaze::expand(1. / rsums, (*data).columns()), ret, asp, nasn, weights);
}
} // namespace coresets
} // namespace minicore
#endif /* FGC_KMEDIAN_H__ */
|
GB_unop__log2_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log2_fc64_fc64
// op(A') function: GB_unop_tran__log2_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_clog2 (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog2 (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_clog2 (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log2_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_clog2 (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log2_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
AlloyDenseSolve.h | /*
* Copyright(C) 2015, Blake C. Lucas, Ph.D. (img.science@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef ALLOYDENSESOLVER_H_
#define ALLOYDENSESOLVER_H_
#include "AlloyMath.h"
#include "AlloyImage.h"
#include "AlloyDenseMatrix.h"
namespace aly {
bool SANITY_CHECK_DENSE_SOLVE();
bool SANITY_CHECK_ROBUST_SOLVE();
void PoissonBlend(const Image4f& in, Image4f& out, int iterations, int levels,float lambda = 0.99f, const std::function<bool(int,int)>& iterationMonitor = nullptr);
void PoissonBlend(const Image4f& in, Image4f& out, int iterations,float lambda = 0.99f, const std::function<bool(int)>& iterationMonitor = nullptr);
void PoissonBlend(const Image2f& in, Image2f& out, int iterations, int levels,float lambda = 0.99f, const std::function<bool(int,int)>& iterationMonitor = nullptr);
void PoissonBlend(const Image2f& in, Image2f& out, int iterations,float lambda = 0.99f, const std::function<bool(int)>& iterationMonitor = nullptr);
void PoissonInpaint(const Image4f& source, const Image4f& target, Image4f& out,int iterations, int levels, float lambda = 0.99f , const std::function<bool(int, int)>& iterationMonitor = nullptr);
void PoissonInpaint(const Image4f& source, const Image4f& target, Image4f& out,int iterations, float lambda = 0.99f, const std::function<bool(int)>& iterationMonitor = nullptr);
void PoissonInpaint(const Image2f& source, const Image2f& target, Image2f& out,int iterations, int levels, float lambda = 0.99f, const std::function<bool(int, int)>& iterationMonitor = nullptr);
void PoissonInpaint(const Image2f& source, const Image2f& target, Image2f& out,int iterations, float lambda = 0.99f, const std::function<bool(int)>& iterationMonitor = nullptr);
void LaplaceFill(const Image4f& sourceImg, Image4f& targetImg, int iterations,int levels, float lambda = 0.99f, const std::function<bool(int, int)>& iterationMonitor=nullptr);
void LaplaceFill(const Image4f& sourceImg, Image4f& targetImg, int iterations,float lambda = 0.99f , const std::function<bool(int)>& iterationMonitor=nullptr);
void LaplaceFill(const Image2f& sourceImg, Image2f& targetImg, int iterations,float lambda = 0.99f, const std::function<bool(int)>& iterationMonitor = nullptr);
void LaplaceFill(const Image2f& sourceImg, Image2f& targetImg, int iterations,int levels, float lambda = 0.99f, const std::function<bool(int, int)>& iterationMonitor = nullptr);
/******************************************************************************
* XLISP-STAT 2.1 Copyright (c) 1990, by Luke Tierney
* XLISP version 2.1, Copyright (c) 1989, by David Betz.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Luke Tierney and David Betz not be
* used in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. Luke Tierney and David Betz
* make no representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty.
*
* LUKE TIERNEY AND DAVID BETZ DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
* IN NO EVENT SHALL LUKE TIERNEY NOR DAVID BETZ BE LIABLE FOR ANY SPECIAL,
* INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*
* XLISP-STAT AUTHOR:
* Luke Tierney
* School of Statistics
* University of Minnesota
* Minneapolis, MN 55455
* (612) 625-7843
*
* Email Address:
* internet: luke@umnstat.stat.umn.edu
*
* XLISP AUTHOR:
* David Betz
* P.O. Box 144
* Peterborough, NH 03458
* (603) 924-4145
******************************************************************************
* XLISP-STAT 2.1 was ported to the Amiga by
* J.K. Lindsey
* Faculty of Economic, Business and Social Sciences,
* University of Liege,
* Sart Tilman B31,
* 4000 Liege,
* Belgium
* 32-41-56.29.64
*
* The above permission and disclaimer also applies to all of the specifically
* Amiga portions of this software, with the restriction that the Amiga
* version not be used for any military-related applications.
******************************************************************************
*/
template<class T, int C> void SVD(const DenseMatrix<T, C>& M,
DenseMatrix<T, C>& U, DenseMatrix<T, C>& D, DenseMatrix<T, C>& Vt,
double zeroTolerance = 0) {
const int m = M.rows;
const int n = M.cols;
std::vector<std::vector<double>> v(n, std::vector<double>(n));
std::vector<std::vector<double>> u(m, std::vector<double>(m));
std::vector<double> w(n);
std::vector<double> rv1(n);
int flag, i, its, j, jj, k, l, nm;
double c, f, h, s, x, y, z;
double anorm, g, scale;
U.resize(m, m);
Vt.resize(n, n);
D.resize(m, n);
for (int cc = 0; cc < C; cc++) {
anorm = 0.0, g = 0.0, scale = 0.0;
if (m < n) {
throw std::runtime_error(
"SVD error, rows must be greater than or equal to cols.");
}
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
u[i][j] = (double)M[i][j][cc];
}
}
for (i = 0; i < n; i++) {
l = i + 1;
rv1[i] = scale * g;
g = s = scale = 0.0;
if (i < m) {
for (k = i; k < m; k++)
scale += std::abs((double)u[k][i]);
if (scale > zeroTolerance) {
for (k = i; k < m; k++) {
u[k][i] = ((double)u[k][i] / scale);
s += ((double)u[k][i] * (double)u[k][i]);
}
f = (double)u[i][i];
g = -sign(std::sqrt(s), f);
h = f * g - s;
u[i][i] = (f - g);
if (i != n - 1) {
for (j = l; j < n; j++) {
for (s = 0.0, k = i; k < m; k++)
s += ((double)u[k][i] * (double)u[k][j]);
f = s / h;
for (k = i; k < m; k++)
u[k][j] += (f * (double)u[k][i]);
}
}
for (k = i; k < m; k++)
u[k][i] = ((double)u[k][i] * scale);
}
}
w[i] = (scale * g);
g = s = scale = 0.0;
if (i < m && i != n - 1) {
for (k = l; k < n; k++)
scale += std::abs((double)u[i][k]);
if (scale > zeroTolerance) {
for (k = l; k < n; k++) {
u[i][k] = ((double)u[i][k] / scale);
s += ((double)u[i][k] * (double)u[i][k]);
}
f = (double)u[i][l];
g = -sign(std::sqrt(s), f);
h = f * g - s;
u[i][l] = (f - g);
for (k = l; k < n; k++)
rv1[k] = (double)u[i][k] / h;
if (i != m - 1) {
for (j = l; j < m; j++) {
for (s = 0.0, k = l; k < n; k++)
s += ((double)u[j][k] * (double)u[i][k]);
for (k = l; k < n; k++)
u[j][k] += (s * rv1[k]);
}
}
for (k = l; k < n; k++)
u[i][k] = ((double)u[i][k] * scale);
}
}
anorm = aly::max(anorm,
(std::abs((double)w[i]) + std::abs(rv1[i])));
}
for (i = n - 1; i >= 0; i--) {
if (i < n - 1) {
if (std::abs(g) > zeroTolerance) {
for (j = l; j < n; j++)
v[j][i] = (((double)u[i][j] / (double)u[i][l]) / g);
for (j = l; j < n; j++) {
for (s = 0.0, k = l; k < n; k++)
s += ((double)u[i][k] * (double)v[k][j]);
for (k = l; k < n; k++)
v[k][j] += (s * (double)v[k][i]);
}
}
for (j = l; j < n; j++)
v[i][j] = v[j][i] = 0.0;
}
v[i][i] = 1.0;
g = rv1[i];
l = i;
}
for (i = n - 1; i >= 0; i--) {
l = i + 1;
g = (double)w[i];
if (i < n - 1)
for (j = l; j < n; j++)
u[i][j] = 0.0;
if (std::abs(g) > zeroTolerance) {
g = 1.0 / g;
if (i != n - 1) {
for (j = l; j < n; j++) {
for (s = 0.0, k = l; k < m; k++)
s += ((double)u[k][i] * (double)u[k][j]);
f = (s / (double)u[i][i]) * g;
for (k = i; k < m; k++)
u[k][j] += (f * (double)u[k][i]);
}
}
for (j = i; j < m; j++)
u[j][i] = ((double)u[j][i] * g);
}
else {
for (j = i; j < m; j++)
u[j][i] = 0.0;
}
++u[i][i];
}
for (k = n - 1; k >= 0; k--) {
for (its = 0; its < 30; its++) {
flag = 1;
for (l = k; l >= 0; l--) {
nm = l - 1;
if (std::abs(rv1[l]) + anorm == anorm) {
flag = 0;
break;
}
if (nm >= 0 && std::abs((double)w[nm]) + anorm == anorm) {
break;
}
}
if (flag) {
c = 0.0;
s = 1.0;
for (i = l; i <= k; i++) {
f = s * rv1[i];
if (std::abs(f) + anorm != anorm) {
g = (double)w[i];
h = pythag(f, g);
w[i] = h;
h = 1.0 / h;
c = g * h;
s = (-f * h);
for (j = 0; j < m; j++) {
y = (double)u[j][nm];
z = (double)u[j][i];
u[j][nm] = (y * c + z * s);
u[j][i] = (z * c - y * s);
}
}
}
}
z = (double)w[k];
if (l == k) {
if (z < 0.0) {
w[k] = (-z);
for (j = 0; j < n; j++)
v[j][k] = (-v[j][k]);
}
int iii, jjj;
for (iii = k; (iii < n - 1) && (w[iii] < w[iii + 1]);
iii++) {
std::swap(w[iii], w[iii + 1]);
for (jjj = 0; jjj < m; jjj++)
std::swap(u[jjj][iii], u[jjj][iii + 1]);
for (jjj = 0; jjj < n; jjj++)
std::swap(v[jjj][iii], v[jjj][iii + 1]);
}
break;
}
if (its >= 30) {
throw std::runtime_error("SVD did not converge.");
}
x = (double)w[l];
nm = k - 1;
y = (double)w[nm];
g = rv1[nm];
h = rv1[k];
f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y);
g = pythag(f, 1.0);
f = ((x - z) * (x + z) + h * ((y / (f + sign(g, f))) - h)) / x;
c = s = 1.0;
for (j = l; j <= nm; j++) {
i = j + 1;
g = rv1[i];
y = (double)w[i];
h = s * g;
g = c * g;
z = pythag(f, h);
rv1[j] = z;
c = f / z;
s = h / z;
f = x * c + g * s;
g = g * c - x * s;
h = y * s;
y = y * c;
for (jj = 0; jj < n; jj++) {
x = (double)v[jj][j];
z = (double)v[jj][i];
v[jj][j] = (x * c + z * s);
v[jj][i] = (z * c - x * s);
}
z = pythag(f, h);
w[j] = z;
if (z) {
z = 1.0 / z;
c = f * z;
s = h * z;
}
f = (c * g) + (s * y);
x = (c * y) - (s * g);
for (jj = 0; jj < m; jj++) {
y = (double)u[jj][j];
z = (double)u[jj][i];
u[jj][j] = (y * c + z * s);
u[jj][i] = (z * c - y * s);
}
}
rv1[l] = 0.0;
rv1[k] = f;
w[k] = x;
}
}
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (i == j) {
D[i][j][cc] = (T)w[j];
}
else {
D[i][j][cc] = T(0);
}
}
}
for (int i = 0; i < m; i++) {
for (int j = 0; j < m; j++) {
U[i][j][cc] = (T)u[i][j];
}
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
Vt[i][j][cc] = (T)v[j][i];
}
}
}
}
template<class T, int C> DenseMatrix<T, C> inverse(const DenseMatrix<T, C>& M,
double zeroTolerance = 0.0) {
if (M.rows != M.cols) {
throw std::runtime_error(MakeString()<<"Could not invert matrix. Rows and columns must agree: [" << M.rows << ", " << M.cols<<"]");
}
DenseMatrix<T, C> U, D, Vt;
SVD(M, U, D, Vt);
int K = aly::min(D.rows, D.cols);
for (int k = 0; k < K; k++) {
vec<double, C> d = vec<double, C>(D[k][k]);
for (int c = 0; c < C; c++) {
if (std::abs(d[c]) > zeroTolerance) {
d[c] = 1.0 / d[c];
}
}
D[k][k] = vec<T, C>(d);
}
return (U * D * Vt).transpose();
}
template<class T, int C> Vector<T, C> SolveSVD(const DenseMatrix<T, C>& A,
const Vector<T, C>& b) {
if (A.rows != (int)b.size()) {
throw std::runtime_error(
MakeString()
<< "Matrix row dimensions and vector length must agree. A=["
<< A.rows << "," << A.cols << "] b=[" << b.size()
<< "]");
}
if (A.rows != A.cols) {
DenseMatrix<T, C> At = A.transpose();
DenseMatrix<T, C> AtA = At * A;
Vector<T, C> Atb = At * b;
return inverse(AtA) * Atb;
}
else {
return inverse(A) * b;
}
}
//Back port of NIST's Java Implementation of LINPACK called JAMA. Code is licensed for free use in the public domain. http://math.nist.gov/javanumerics/jama/
/** LU Decomposition.
<P>
For an m-by-n matrix A with m >= n, the LU decomposition is an m-by-n
unit lower triangular matrix L, an n-by-n upper triangular matrix U,
and a permutation vector piv of length m so that A(piv,:) = L*U.
If m < n, then L is m-by-m and U is m-by-n.
<P>
The LU decompostion with pivoting always exists, even if the matrix is
singular, so the constructor will never fail. The primary use of the
LU decomposition is in the solution of square systems of simultaneous
linear equations. This will fail if isNonsingular() returns false.
*/
template<class T, int C> bool LU(const DenseMatrix<T, C>& A,
DenseMatrix<T, 1>& L, DenseMatrix<T, 1>& U, std::vector<int>& piv,
int cc = 0, const double zeroTolerance = 0.0) {
const int m = A.rows;
const int n = A.cols;
std::vector<std::vector<double>> LU(m, std::vector<double>(n, 0.0));
piv.resize(m);
std::vector<double> LUcolj(m, 0.0);
int pivsign;
double* LUrowi;
L.resize(m, n);
U.resize(n, n);
bool nonSingular = true;
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
LU[i][j] = (double)A[i][j][cc];
}
}
for (int i = 0; i < m; i++) {
piv[i] = i;
}
pivsign = 1;
for (int j = 0; j < n; j++) {
for (int i = 0; i < m; i++) {
LUcolj[i] = LU[i][j];
}
for (int i = 0; i < m; i++) {
LUrowi = &LU[i][0];
int kmax = aly::min(i, j);
double s = 0.0;
for (int k = 0; k < kmax; k++) {
s += LUrowi[k] * LUcolj[k];
}
LUrowi[j] = LUcolj[i] -= s;
}
int p = j;
for (int i = j + 1; i < m; i++) {
if (std::abs(LUcolj[i]) > std::abs(LUcolj[p])) {
p = i;
}
}
if (p != j) {
for (int k = 0; k < n; k++) {
std::swap(LU[p][k], LU[j][k]);
}
std::swap(piv[p], piv[j]);
pivsign = -pivsign;
}
if (j < m && std::abs(LU[j][j]) > zeroTolerance) {
for (int i = j + 1; i < m; i++) {
LU[i][j] /= LU[j][j];
}
}
}
for (int j = 0; j < n; j++) {
if (std::abs(LU[j][j]) <= zeroTolerance) {
nonSingular = false;
break;
}
}
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (i > j) {
L[i][j].x = (T)LU[i][j];
}
else if (i == j) {
L[i][j].x = T(1.0);
}
else {
L[i][j].x = T(0.0);
}
}
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (i <= j) {
U[i][j].x = T(LU[i][j]);
}
else {
U[i][j].x = T(0.0);
}
}
}
return nonSingular;
}
template<class T, int C> Vector<T, C> SolveLU(const DenseMatrix<T, C>& A,
const Vector<T, C>& b) {
if (A.rows != (int)b.size()) {
throw std::runtime_error(
MakeString()
<< "Matrix row dimensions and vector length must agree. A=["
<< A.rows << "," << A.cols << "] b=[" << b.size()
<< "]");
}
if (A.rows != A.cols) {
DenseMatrix<T, C> At = A.transpose();
DenseMatrix<T, C> AtA = At * A;
Vector<T, C> Atb = At * b;
int n = AtA.cols;
Vector<T, C> x(A.cols);
Vector<T, C> y(A.cols);
DenseMatrix<T, 1> L, U;
std::vector<int> piv;
for (int cc = 0; cc < C; cc++) {
bool nonSingular = LU(AtA, L, U, piv, cc);
if (!nonSingular) {
throw std::runtime_error("Matrix is singular.");
}
// Forward solve Ly = b
for (int i = 0; i < n; i++) {
y[i][cc] = Atb[piv[i]][cc];
for (int j = 0; j < i; j++) {
y[i][cc] -= L[i][j].x * y[j][cc];
}
y[i][cc] /= L[i][i].x;
}
// Backward solve Ux = y
for (int i = n - 1; i >= 0; i--) {
x[i][cc] = y[i][cc];
for (int j = i + 1; j < n; j++) {
x[i][cc] -= U[i][j].x * x[j][cc];
}
x[i][cc] /= U[i][i].x;
}
}
return x;
}
else {
int n = A.cols;
Vector<T, C> x(A.cols);
Vector<T, C> y(A.cols);
DenseMatrix<T, 1> L, U;
std::vector<int> piv;
for (int cc = 0; cc < C; cc++) {
bool nonSingular = LU(A, L, U, piv, cc);
if (!nonSingular) {
throw std::runtime_error("Matrix is singular.");
}
// Forward solve Ly = b
for (int i = 0; i < n; i++) {
y[i][cc] = b[piv[i]][cc];
for (int j = 0; j < i; j++) {
y[i][cc] -= L[i][j].x * y[j][cc];
}
y[i][cc] /= L[i][i].x;
}
// Backward solve Ux = y
for (int i = n - 1; i >= 0; i--) {
x[i][cc] = y[i][cc];
for (int j = i + 1; j < n; j++) {
x[i][cc] -= U[i][j].x * x[j][cc];
}
x[i][cc] /= U[i][i].x;
}
}
return x;
}
}
/** QR Decomposition.
<P>
For an m-by-n matrix A with m >= n, the QR decomposition is an m-by-n
orthogonal matrix Q and an n-by-n upper triangular matrix R so that
A = Q*R.
<P>
The QR decompostion always exists, even if the matrix does not have
full rank, so the constructor will never fail. The primary use of the
QR decomposition is in the least squares solution of nonsquare systems
of simultaneous linear equations. This will fail if isFullRank()
returns false.
*/
template<class T, int C> bool QR(const DenseMatrix<T, C>& A,
DenseMatrix<T, C>& Q, DenseMatrix<T, C>& R) {
const int m = A.rows;
const int n = A.cols;
std::vector<std::vector<double>> QR(m, std::vector<double>(n));
std::vector<double> Rdiag(m);
R.resize(n, n);
Q.resize(m, n);
bool nonSingular = true;
for (int cc = 0; cc < C; cc++) {
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
QR[i][j] = (double)A[i][j][cc];
}
}
for (int k = 0; k < n; k++) {
double nrm = 0;
for (int i = k; i < m; i++) {
nrm = pythag(nrm, QR[i][k]);
}
if (nrm != 0.0) {
if (QR[k][k] < 0) {
nrm = -nrm;
}
for (int i = k; i < m; i++) {
QR[i][k] /= nrm;
}
QR[k][k] += 1.0;
for (int j = k + 1; j < n; j++) {
double s = 0.0;
for (int i = k; i < m; i++) {
s += QR[i][k] * QR[i][j];
}
s = -s / QR[k][k];
for (int i = k; i < m; i++) {
QR[i][j] += s * QR[i][k];
}
}
}
Rdiag[k] = -nrm;
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (i < j) {
R[i][j][cc] = T(QR[i][j]);
}
else if (i == j) {
R[i][j][cc] = T(Rdiag[i]);
}
else {
R[i][j][cc] = T(0.0);
}
}
}
for (int j = 0; j < n; j++) {
if (Rdiag[j] == 0) {
nonSingular = false;
break;
}
}
for (int k = n - 1; k >= 0; k--) {
for (int i = 0; i < m; i++) {
Q[i][k][cc] = T(0.0);
}
Q[k][k][cc] = T(1.0);
for (int j = k; j < n; j++) {
if (QR[k][k] != 0) {
double s = 0.0;
for (int i = k; i < m; i++) {
s += QR[i][k] * Q[i][j][cc];
}
s = -s / QR[k][k];
for (int i = k; i < m; i++) {
Q[i][j][cc] += T(s * QR[i][k]);
}
}
}
}
}
return nonSingular;
}
template<class T, int C> Vector<T, C> SolveQR(const DenseMatrix<T, C>& A,
const Vector<T, C>& b) {
if (A.rows != (int)b.size()) {
throw std::runtime_error(
MakeString()
<< "Matrix row dimensions and vector length must agree. A=["
<< A.rows << "," << A.cols << "] b=[" << b.size()
<< "]");
}
if (A.rows != A.cols) {
DenseMatrix<T, C> At = A.transpose();
DenseMatrix<T, C> AtA = At * A;
Vector<T, C> Atb = At * b;
int n = AtA.cols;
Vector<T, C> x(A.cols);
DenseMatrix<T, C> Q, R;
bool nonSingular = QR(AtA, Q, R);
if (!nonSingular) {
throw std::runtime_error("Matrix is singular.");
}
// Compute Y = transpose(Q)*B
x = Q.transpose() * Atb;
// Solve R*X = Y;
for (int k = n - 1; k >= 0; k--) {
x[k] /= R[k][k];
for (int i = 0; i < k; i++) {
x[i] -= x[k] * R[i][k];
}
}
return x;
}
else {
int n = A.cols;
Vector<T, C> x(A.cols);
DenseMatrix<T, C> Q, R;
bool nonSingular = QR(A, Q, R);
if (!nonSingular) {
throw std::runtime_error("Matrix is singular.");
}
// Compute Y = transpose(Q)*B
x = Q.transpose() * b;
// Solve R*X = Y;
for (int k = n - 1; k >= 0; k--) {
x[k] /= R[k][k];
for (int i = 0; i < k; i++) {
x[i] -= x[k] * R[i][k];
}
}
return x;
}
}
enum class MatrixFactorization {
SVD, QR, LU
};
template<class C, class R> std::basic_ostream<C, R> & operator <<(
std::basic_ostream<C, R> & ss, const MatrixFactorization& type) {
switch (type) {
case MatrixFactorization::SVD:
return ss << "SVD";
case MatrixFactorization::QR:
return ss << "QR";
case MatrixFactorization::LU:
return ss << "LU";
}
return ss;
}
template<class T, int C> Vector<T, C> Solve(const DenseMatrix<T, C>& A,
const Vector<T, C>& b, MatrixFactorization factor =
MatrixFactorization::SVD) {
switch (factor) {
case MatrixFactorization::SVD:
return SolveSVD(A, b);
case MatrixFactorization::QR:
return SolveQR(A, b);
case MatrixFactorization::LU:
return SolveLU(A, b);
}
return Vector<T, C>();
}
template<class T, int C> Vector<T, C> SolveRobust(const DenseMatrix<T, C>& A, const Vector<T, C>& b,
int p = 1, int iterations = 100, double errorTolerance = 1E-6f,
double zeroTolerance = 1E-16, MatrixFactorization factor = MatrixFactorization::SVD) {
int N = (int)b.size();
Vector<T, C> W(N);
W.set(vec<T, C>(T(1)));
Vector<T, C> X;
double lastError = std::numeric_limits<double>::max();
for (int iter = 0;iter < iterations;iter++) {
X = SolveQR(W*A, W*b);
Vector<T, C> R = b - A*X;
vec<double, C> err = lengthVecSqr(R);
double e = lengthL1(err) / N;
if (std::abs(e - lastError) < errorTolerance) {
break;
}
lastError = e;
#pragma omp parallel for
for (int n = 0;n < N;n++) {
vec<T, C> w;
vec<T, C> r = R[n];
for (int c = 0;c < C;c++) {
w[c] = std::pow(std::max(std::abs(r[c]), T(zeroTolerance)), -p);
}
W[n] = w;
}
}
return X;
}
template<class T, int C> Vector<T, C> SolveRansac(const DenseMatrix<T, C>& A, const Vector<T, C>& b,
int sampleSize, double inlierTolerance = 0.01f, int iterations = 100) {
int N = (int)b.size();
DenseMatrix<T, C> As;
Vector<T, C> bs;
std::vector<int> order(N);
std::random_device rd;
std::mt19937 g(rd());
for (int n = 0;n < N;n++) {
order[n] = n;
}
Vector<T, C> X;
if (N <= sampleSize) {
return SolveQR(A, b);
}
std::shuffle(order.begin(), order.end(), g);
Vector<T, C> W(N);
int bestInliner = 0;
Vector<T, C> BestX;
int offset = 0;
for (int iter = 0;iter < iterations;iter++) {
W.set(vec<T, C>(T(0.0)));
As.resize(sampleSize, A.cols);
bs.resize(sampleSize);
for (int i = 0;i < sampleSize;i++) {
int idx = order[(i + offset)%N];
As[i] = A[idx];
bs[i] = b[idx];
}
X = SolveQR(As, bs);
Vector<T, C> R = b - A*X;
int count = 0;
for (int n = 0;n < N;n++) {
if (lengthL1(R[n]) < inlierTolerance) {
count++;
}
}
if (count > bestInliner) {
BestX = X;
bestInliner = count;
}
if (count > std::max(N / 2, sampleSize))break;//Good enough set of inliers, break
offset += sampleSize;
if (offset >= N) {
std::shuffle(order.begin(), order.end(), g);
offset = 0;
}
}
if (bestInliner<A.cols) {
return SolveQR(A, b);
}
order.clear();
Vector<T, C> R = b - A*BestX;
for (int n = 0;n < N;n++) {
if (lengthL1(R[n]) < inlierTolerance) {
order.push_back(n);
}
}
if ((int)order.size() < sampleSize) {
return BestX;
}
As.resize((int)order.size(), A.cols);
bs.resize((int)order.size());
for (int i = 0;i < (int)order.size();i++) {
int idx = order[i];
As[i] = A[idx];
bs[i] = b[idx];
}
X = SolveQR(As, bs);
return X;
}
}
#endif
|
fractal.c | /* Name: fractal.c v0.0.1
* Date: 2021-09-24
* Intro: Render fractals.
* Update: Functions fractalJuliaSet and fractalMandelbrotSet no longer
generate wrong range (0, clrUsed] but right [0, clrUsed).
*/
#include "fractal.h"
#if _FRACTAL_H != 0x000000
#error fractal.h version not supported
#endif
void fractalBarnsleyFern(char* path, int32_t width, int32_t height, uint32_t clrUsed, PALETTE palette, LINE lineX, LINE lineY, uint32_t repeat, uint32_t branch, uint32_t choice, double affine[][6], double x, double y) {
int32_t i;
uint32_t absHeight, rowSize, pixelArraySize, offBits, size;
BMP bmp = BMPInit(width, height, 8, clrUsed, palette, &absHeight, &rowSize, &pixelArraySize, &offBits, &size);
lineReverse(lineX);
lineReverse(lineY);
#ifdef _FRACTAL_USE_MT19937
init_genrand(time(NULL));
#else
srand(time(NULL));
#endif
#ifdef _FRACTAL_USE_OMP
#pragma omp parallel for
#endif
for(i=0; i<branch; ++i) {
int32_t j, m, n, o;
double p=x, q=y, t;
for(j=0; j<repeat; ++j) {
m = lineSubstitute(lineY, q);
n = lineSubstitute(lineX, p);
if(m>=0 && m<width && n>=0 && n<absHeight) {
o = rowSize * m + n + offBits;
/* A hack for not using critical to increase efficiency. */
if(bmp[o] < clrUsed - 1) {
++bmp[o];
}
if(bmp[o] == (uint32_t)clrUsed) {
--bmp[o];
}
}
#ifdef _FRACTAL_USE_MT19937
o = genrand_int32() % choice;
#else
o = rand() % choice;
#endif
t = affine[o][0] * p + affine[o][1] * q + affine[o][2];
q = affine[o][3] * p + affine[o][4] * q + affine[o][5];
p = t;
}
}
BMPsave(bmp, path);
}
void fractalJuliaSet(char* path, int32_t width, int32_t height, uint32_t clrUsed, PALETTE palette, LINE lineX, LINE lineY, double x, double y) {
int32_t i;
uint32_t absHeight, rowSize, pixelArraySize, offBits, size;
BMP bmp = BMPInit(width, height, 8, clrUsed, palette, &absHeight, &rowSize, &pixelArraySize, &offBits, &size);
#ifdef _FRACTAL_USE_OMP
#pragma omp parallel for
#endif
for(i=0; i<width; ++i) {
int32_t j, k;
for(j=0; j<height; ++j) {
double m = lineSubstitute(lineX, j);
double n = lineSubstitute(lineY, i);
double t;
for(k=0; k<clrUsed && m*m + n*n < 4.0; ++k) {
t = m*m - n*n + x;
n = 2*m*n + y;
m = t;
}
bmp[rowSize * i + j + offBits] = k - 1;
}
}
BMPsave(bmp, path);
}
void fractalLogisticMap(char* path, int32_t width, int32_t height, uint32_t clrUsed, PALETTE palette, LINE lineX, LINE lineY, uint32_t preRepeat, uint32_t repeat) {
int32_t i;
uint32_t absHeight, rowSize, pixelArraySize, offBits, size;
BMP bmp = BMPInit(width, height, 8, clrUsed, palette, &absHeight, &rowSize, &pixelArraySize, &offBits, &size);
lineReverse(lineX);
#ifdef _FRACTAL_USE_OMP
#pragma omp parallel for
#endif
for(i=0; i<absHeight; ++i) {
double x = lineSubstitute(lineY, i);
double y = 0.5;
int32_t j, k, o;
for(k=0; k<preRepeat; ++k) {
y *= (1 - y) * x;
}
for(k=0; k<repeat; ++k) {
y *= (1 - y) * x;
j = lineSubstitute(lineX, y);
if(0<=j && j<width) {
o = rowSize * i + j + offBits;
if(bmp[o] < clrUsed - 1) {
++bmp[o];
}
}
}
}
BMPsave(bmp, path);
}
void fractalMandelbrotSet(char* path, int32_t width, int32_t height, uint32_t clrUsed, PALETTE palette, LINE lineX, LINE lineY) {
int32_t i;
uint32_t absHeight, rowSize, pixelArraySize, offBits, size;
BMP bmp = BMPInit(width, height, 8, clrUsed, palette, &absHeight, &rowSize, &pixelArraySize, &offBits, &size);
#ifdef _FRACTAL_USE_OMP
#pragma omp parallel for
#endif
for(i=0; i<width; ++i) {
int32_t j, k;
for(j=0; j<height; ++j) {
double x = lineSubstitute(lineX, j);
double y = lineSubstitute(lineY, i);
double m = 0.0;
double n = 0.0;
double t;
for(k=0; k<clrUsed && m*m + n*n < 4.0; ++k) {
t = m*m - n*n + x;
n = 2*m*n + y;
m = t;
}
bmp[rowSize * i + j + offBits] = k - 1;
}
}
BMPsave(bmp, path);
}
|
omp_task_detach.c | <ompts:test>
<ompts:testdescription>Test which checks the detach clause of the omp task directive.</ompts:testdescription>
<ompts:ompversion>5.0</ompts:ompversion>
<ompts:directive>omp task detach,omp_fulfill_event</ompts:directive>
<ompts:dependences>omp task, omp atomic</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
<ompts:orphan:vars>
int result1 = 0;
int result2 = 0;
</ompts:orphan:vars>
int <ompts:testcode:functionname>omp_task_detach</ompts:testcode:functionname>(FILE * logFile){
result1 = 0;
result2 = 0;
#pragma omp parallel
{
#pragma omp master
{
<ompts:orphan>
omp_event_handle_t event;
#pragma omp task shared(result1) <ompts:check>detach(event)</ompts:check>
{
<ompts:check>omp_fulfill_event(event);</ompts:check>
result1 = 1;
}
</ompts:orphan>
#pragma omp taskwait
}
#pragma omp master
{
<ompts:crosscheck>int dummy = 0;</ompts:crosscheck>
omp_event_handle_t event;
#pragma omp task depend(out: result2) shared(result2) <ompts:check>detach(event)</ompts:check>
{
#pragma omp atomic
result2++;
}
#pragma omp task depend(out:result2) shared(result2) <ompts:crosscheck>depend(in:dummy)</ompts:crosscheck>
{
result2 *= 2;
}
#pragma omp task shared(result2) <ompts:crosscheck>depend(out:dummy)</ompts:crosscheck>
{
my_sleep (SLEEPTIME);
#pragma omp atomic
result2++;
<ompts:check>omp_fulfill_event(event);</ompts:check>
}
#pragma omp taskwait
}
}
printf("result1: %d\n", result1);
printf("result2: %d\n", result2);
return (result1 == 1) && (result2 == 4);
}
</ompts:testcode>
</ompts:test>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.