source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_binop__rminus_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rminus_int8
// A.*B function (eWiseMult): GB_AemultB__rminus_int8
// A*D function (colscale): GB_AxD__rminus_int8
// D*A function (rowscale): GB_DxB__rminus_int8
// C+=B function (dense accum): GB_Cdense_accumB__rminus_int8
// C+=b function (dense accum): GB_Cdense_accumb__rminus_int8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_int8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_int8
// C=scalar+B GB_bind1st__rminus_int8
// C=scalar+B' GB_bind1st_tran__rminus_int8
// C=A+scalar GB_bind2nd__rminus_int8
// C=A'+scalar GB_bind2nd_tran__rminus_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (y - x) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT8 || GxB_NO_RMINUS_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rminus_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rminus_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rminus_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB_bind1st_tran__rminus_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB_bind2nd_tran__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
directive.h |
#pragma omp for schedule (static, chunk)
|
ffindex_apply.c | /*
* FFindex
* written by Andreas Hauser <andy@splashground.de>.
* Please add your name here if you distribute modified versions.
*
* FFindex is provided under the Create Commons license "Attribution-ShareAlike
* 3.0", which basically captures the spirit of the Gnu Public License (GPL).
*
* See:
* http://creativecommons.org/licenses/by-sa/3.0/
*
* ffindex_apply
* apply a program to each FFindex entry
*/
#define _GNU_SOURCE 1
#define _LARGEFILE64_SOURCE 1
#define _FILE_OFFSET_BITS 64
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "ffindex.h"
#include "ffutil.h"
int main(int argn, char **argv)
{
if(argn < 4)
{
fprintf(stderr, "USAGE: %s DATA_FILENAME INDEX_FILENAME PROGRAM [PROGRAM_ARGS]*\n"
FFINDEX_COPYRIGHT,
argv[0]);
return -1;
}
char *data_filename = argv[1];
char *index_filename = argv[2];
char *program_name = argv[3];
char **program_argv = argv + 3;
FILE *data_file = fopen(data_filename, "r");
FILE *index_file = fopen(index_filename, "r");
if( data_file == NULL) { fferror_print(__FILE__, __LINE__, argv[0], data_filename); exit(EXIT_FAILURE); }
if(index_file == NULL) { fferror_print(__FILE__, __LINE__, argv[0], index_filename); exit(EXIT_FAILURE); }
size_t data_size;
char *data = ffindex_mmap_data(data_file, &data_size);
ffindex_index_t* index = ffindex_index_parse(index_file, 0);
if(index == NULL)
{
fferror_print(__FILE__, __LINE__, "ffindex_index_parse", index_filename);
exit(EXIT_FAILURE);
}
// Ignore SIGPIPE
struct sigaction handler;
handler.sa_handler = SIG_IGN;
sigemptyset(&handler.sa_mask);
handler.sa_flags = 0;
sigaction(SIGPIPE, &handler, NULL);
size_t range_start = 0;
size_t range_end = index->n_entries;
// Foreach entry
//#pragma omp parallel for
for(size_t entry_index = range_start; entry_index < range_end; entry_index++)
{
//fprintf(stderr, "index %ld\n", entry_index);
int ret = 0;
ffindex_entry_t* entry = ffindex_get_entry_by_index(index, entry_index);
if(entry == NULL) { perror(entry->name); continue; }
int pipefd[2];
ret = pipe(pipefd);
if(ret != 0) { perror(entry->name); continue; }
pid_t child_pid = fork();
if(child_pid == 0)
{
fclose(data_file);
fclose(index_file);
close(pipefd[1]);
// Make pipe from parent our new stdin
int newfd = dup2(pipefd[0], fileno(stdin));
if(newfd < 0) { fprintf(stdout, "%d %d\n", pipefd[0], newfd); perror(entry->name); }
close(pipefd[0]);
// exec program with the pipe as stdin
execvp(program_name, program_argv);
// never reached
}
else if(child_pid > 0)
{
// Read end is for child only
close(pipefd[0]);
// Write file data to child's stdin.
char *filedata = ffindex_get_data_by_entry(data, entry);
ssize_t written = 0;
while(written < entry->length)
{
int w = write(pipefd[1], filedata + written, entry->length - written);
if(w < 0 && errno != EPIPE) { perror(entry->name); break; }
else if(w == 0 && errno != 0) { perror(entry->name); break; }
else
written += w;
}
close(pipefd[1]); // child gets EOF
waitpid(child_pid, NULL, 0);
}
else
{
perror(entry->name);
exit(errno);
}
}
return 0;
}
/* vim: ts=2 sw=2 et
*/
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
//Index nc = blocking.nc(); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
std::size_t sizeA = kc*mc;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
RhsScalar* blockB = blocking.blockB();
eigen_internal_assert(blockB!=0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
// Pack B_k to B' in a parallel fashion:
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
// However, before copying to B'_j, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
// Notify the other threads that the part B'_j is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
for(Index shift=0; shift<threads; ++shift)
{
Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
while(info[j].sync!=k) {}
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
}
// Then keep going as usual with the remaining A'
for(Index i=mc; i<rows; i+=mc)
{
const Index actual_mc = (std::min)(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
// C_i += A' * B'
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
}
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index j=0; j<threads; ++j)
#pragma omp atomic
--(info[j].users);
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
// Note that this panel will be read as many times as the number of blocks in the lhs's
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro vertical panel of the large rhs's panel (e.g., cols/4 times).
pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
// Everything is packed, we can now call the block * panel kernel:
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
}
}
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
: traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession() const
{
m_blocking.allocateB();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
/*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
/*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
RhsScalar* m_blockW;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
inline RhsScalar* blockW() { return m_blockW; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth,
SizeW = MaxDepth * Traits::WorkSpaceFactor
};
EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
public:
gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
this->m_blockW = m_staticW;
}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateW() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
DenseIndex m_sizeW;
public:
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateW()
{
if(this->m_blockW==0)
this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
}
void allocateAll()
{
allocateA();
allocateB();
allocateW();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
aligned_delete(this->m_blockW, m_sizeW);
}
};
} // end namespace internal
template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, GemmProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
{
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef Scalar ResScalar;
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{
typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
}
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
{
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
omptarget.h | //===---- omptarget.h - OpenMP GPU initialization ---------------- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declarations of all library macros, types,
// and functions.
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_H
#define OMPTARGET_H
#include "common/allocator.h"
#include "common/debug.h" // debug
#include "common/state-queue.h"
#include "common/support.h"
#include "interface.h" // interfaces with omp, compiler, and user
#include "target_impl.h"
#define OMPTARGET_NVPTX_VERSION 1.1
// used by the library for the interface with the app
#define DISPATCH_FINISHED 0
#define DISPATCH_NOTFINISHED 1
// used by dynamic scheduling
#define FINISHED 0
#define NOT_FINISHED 1
#define LAST_CHUNK 2
#define BARRIER_COUNTER 0
#define ORDERED_COUNTER 1
// arguments needed for L0 parallelism only.
class omptarget_nvptx_SharedArgs {
public:
// All these methods must be called by the master thread only.
INLINE void Init() {
args = buffer;
nArgs = MAX_SHARED_ARGS;
}
INLINE void DeInit() {
// Free any memory allocated for outlined parallel function with a large
// number of arguments.
if (nArgs > MAX_SHARED_ARGS) {
SafeFree(args, "new extended args");
Init();
}
}
INLINE void EnsureSize(size_t size) {
if (size > nArgs) {
if (nArgs > MAX_SHARED_ARGS) {
SafeFree(args, "new extended args");
}
args = (void **)SafeMalloc(size * sizeof(void *), "new extended args");
nArgs = size;
}
}
// Called by all threads.
INLINE void **GetArgs() const { return args; };
private:
// buffer of pre-allocated arguments.
void *buffer[MAX_SHARED_ARGS];
// pointer to arguments buffer.
// starts off as a pointer to 'buffer' but can be dynamically allocated.
void **args;
// starts off as MAX_SHARED_ARGS but can increase in size.
uint32_t nArgs;
};
extern DEVICE
omptarget_nvptx_SharedArgs EXTERN_SHARED(omptarget_nvptx_globalArgs);
// Worker slot type which is initialized with the default worker slot
// size of 4*32 bytes.
struct __kmpc_data_sharing_slot {
__kmpc_data_sharing_slot *Next;
__kmpc_data_sharing_slot *Prev;
void *PrevSlotStackPtr;
void *DataEnd;
char Data[DS_Worker_Warp_Slot_Size];
};
// Data structure to keep in shared memory that traces the current slot, stack,
// and frame pointer as well as the active threads that didn't exit the current
// environment.
struct DataSharingStateTy {
__kmpc_data_sharing_slot *SlotPtr[DS_Max_Warp_Number];
void *StackPtr[DS_Max_Warp_Number];
void *volatile FramePtr[DS_Max_Warp_Number];
__kmpc_impl_lanemask_t ActiveThreads[DS_Max_Warp_Number];
};
extern DEVICE DataSharingStateTy EXTERN_SHARED(DataSharingState);
////////////////////////////////////////////////////////////////////////////////
// task ICV and (implicit & explicit) task state
class omptarget_nvptx_TaskDescr {
public:
// methods for flags
INLINE omp_sched_t GetRuntimeSched() const;
INLINE void SetRuntimeSched(omp_sched_t sched);
INLINE int InParallelRegion() const { return items.flags & TaskDescr_InPar; }
INLINE int InL2OrHigherParallelRegion() const {
return items.flags & TaskDescr_InParL2P;
}
INLINE int IsParallelConstruct() const {
return items.flags & TaskDescr_IsParConstr;
}
INLINE int IsTaskConstruct() const { return !IsParallelConstruct(); }
// methods for other fields
INLINE uint16_t &ThreadId() { return items.threadId; }
INLINE uint64_t &RuntimeChunkSize() { return items.runtimeChunkSize; }
INLINE omptarget_nvptx_TaskDescr *GetPrevTaskDescr() const { return prev; }
INLINE void SetPrevTaskDescr(omptarget_nvptx_TaskDescr *taskDescr) {
prev = taskDescr;
}
// init & copy
INLINE void InitLevelZeroTaskDescr();
INLINE void InitLevelOneTaskDescr(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void Copy(omptarget_nvptx_TaskDescr *sourceTaskDescr);
INLINE void CopyData(omptarget_nvptx_TaskDescr *sourceTaskDescr);
INLINE void CopyParent(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void CopyForExplicitTask(omptarget_nvptx_TaskDescr *parentTaskDescr);
INLINE void CopyToWorkDescr(omptarget_nvptx_TaskDescr *masterTaskDescr);
INLINE void CopyFromWorkDescr(omptarget_nvptx_TaskDescr *workTaskDescr);
INLINE void CopyConvergentParent(omptarget_nvptx_TaskDescr *parentTaskDescr,
uint16_t tid, uint16_t tnum);
INLINE void SaveLoopData();
INLINE void RestoreLoopData() const;
private:
// bits for flags: (6 used, 2 free)
// 3 bits (SchedMask) for runtime schedule
// 1 bit (InPar) if this thread has encountered one or more parallel region
// 1 bit (IsParConstr) if ICV for a parallel region (false = explicit task)
// 1 bit (InParL2+) if this thread has encountered L2 or higher parallel
// region
static const uint8_t TaskDescr_SchedMask = (0x1 | 0x2 | 0x4);
static const uint8_t TaskDescr_InPar = 0x10;
static const uint8_t TaskDescr_IsParConstr = 0x20;
static const uint8_t TaskDescr_InParL2P = 0x40;
struct SavedLoopDescr_items {
int64_t loopUpperBound;
int64_t nextLowerBound;
int64_t chunk;
int64_t stride;
kmp_sched_t schedule;
} loopData;
struct TaskDescr_items {
uint8_t flags; // 6 bit used (see flag above)
uint8_t unused;
uint16_t threadId; // thread id
uint64_t runtimeChunkSize; // runtime chunk size
} items;
omptarget_nvptx_TaskDescr *prev;
};
// build on kmp
typedef struct omptarget_nvptx_ExplicitTaskDescr {
omptarget_nvptx_TaskDescr
taskDescr; // omptarget_nvptx task description (must be first)
kmp_TaskDescr kmpTaskDescr; // kmp task description (must be last)
} omptarget_nvptx_ExplicitTaskDescr;
////////////////////////////////////////////////////////////////////////////////
// Descriptor of a parallel region (worksharing in general)
class omptarget_nvptx_WorkDescr {
public:
// access to data
INLINE omptarget_nvptx_TaskDescr *WorkTaskDescr() { return &masterTaskICV; }
private:
omptarget_nvptx_TaskDescr masterTaskICV;
};
////////////////////////////////////////////////////////////////////////////////
class omptarget_nvptx_TeamDescr {
public:
// access to data
INLINE omptarget_nvptx_TaskDescr *LevelZeroTaskDescr() {
return &levelZeroTaskDescr;
}
INLINE omptarget_nvptx_WorkDescr &WorkDescr() {
return workDescrForActiveParallel;
}
// init
INLINE void InitTeamDescr();
INLINE __kmpc_data_sharing_slot *GetPreallocatedSlotAddr(int wid) {
worker_rootS[wid].DataEnd =
&worker_rootS[wid].Data[0] + DS_Worker_Warp_Slot_Size;
// We currently do not have a next slot.
worker_rootS[wid].Next = 0;
worker_rootS[wid].Prev = 0;
worker_rootS[wid].PrevSlotStackPtr = 0;
return (__kmpc_data_sharing_slot *)&worker_rootS[wid];
}
private:
omptarget_nvptx_TaskDescr
levelZeroTaskDescr; // icv for team master initial thread
omptarget_nvptx_WorkDescr
workDescrForActiveParallel; // one, ONLY for the active par
ALIGN(16)
__kmpc_data_sharing_slot worker_rootS[DS_Max_Warp_Number];
};
////////////////////////////////////////////////////////////////////////////////
// thread private data (struct of arrays for better coalescing)
// tid refers here to the global thread id
// do not support multiple concurrent kernel a this time
class omptarget_nvptx_ThreadPrivateContext {
public:
// task
INLINE omptarget_nvptx_TaskDescr *Level1TaskDescr(int tid) {
return &levelOneTaskDescr[tid];
}
INLINE void SetTopLevelTaskDescr(int tid,
omptarget_nvptx_TaskDescr *taskICV) {
topTaskDescr[tid] = taskICV;
}
INLINE omptarget_nvptx_TaskDescr *GetTopLevelTaskDescr(int tid) const;
// parallel
INLINE uint16_t &NumThreadsForNextParallel(int tid) {
return nextRegion.tnum[tid];
}
// schedule (for dispatch)
INLINE kmp_sched_t &ScheduleType(int tid) { return schedule[tid]; }
INLINE int64_t &Chunk(int tid) { return chunk[tid]; }
INLINE int64_t &LoopUpperBound(int tid) { return loopUpperBound[tid]; }
INLINE int64_t &NextLowerBound(int tid) { return nextLowerBound[tid]; }
INLINE int64_t &Stride(int tid) { return stride[tid]; }
INLINE omptarget_nvptx_TeamDescr &TeamContext() { return teamContext; }
INLINE void InitThreadPrivateContext(int tid);
INLINE uint64_t &Cnt() { return cnt; }
private:
// team context for this team
omptarget_nvptx_TeamDescr teamContext;
// task ICV for implicit threads in the only parallel region
omptarget_nvptx_TaskDescr levelOneTaskDescr[MAX_THREADS_PER_TEAM];
// pointer where to find the current task ICV (top of the stack)
omptarget_nvptx_TaskDescr *topTaskDescr[MAX_THREADS_PER_TEAM];
union {
// Only one of the two is live at the same time.
// parallel
uint16_t tnum[MAX_THREADS_PER_TEAM];
} nextRegion;
// schedule (for dispatch)
kmp_sched_t schedule[MAX_THREADS_PER_TEAM]; // remember schedule type for #for
int64_t chunk[MAX_THREADS_PER_TEAM];
int64_t loopUpperBound[MAX_THREADS_PER_TEAM];
// state for dispatch with dyn/guided OR static (never use both at a time)
int64_t nextLowerBound[MAX_THREADS_PER_TEAM];
int64_t stride[MAX_THREADS_PER_TEAM];
uint64_t cnt;
};
/// Memory manager for statically allocated memory.
class omptarget_nvptx_SimpleMemoryManager {
private:
struct MemDataTy {
volatile unsigned keys[OMP_STATE_COUNT];
} MemData[MAX_SM] ALIGN(128);
INLINE static uint32_t hash(unsigned key) {
return key & (OMP_STATE_COUNT - 1);
}
public:
INLINE void Release();
INLINE const void *Acquire(const void *buf, size_t size);
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// global data tables
////////////////////////////////////////////////////////////////////////////////
extern DEVICE omptarget_nvptx_SimpleMemoryManager
omptarget_nvptx_simpleMemoryManager;
extern DEVICE uint32_t EXTERN_SHARED(usedMemIdx);
extern DEVICE uint32_t EXTERN_SHARED(usedSlotIdx);
#if _OPENMP
extern DEVICE uint8_t parallelLevel[MAX_THREADS_PER_TEAM / WARPSIZE];
#pragma omp allocate(parallelLevel) allocator(omp_pteam_mem_alloc)
#else
extern DEVICE
uint8_t EXTERN_SHARED(parallelLevel)[MAX_THREADS_PER_TEAM / WARPSIZE];
#endif
extern DEVICE uint16_t EXTERN_SHARED(threadLimit);
extern DEVICE uint16_t EXTERN_SHARED(threadsInTeam);
extern DEVICE uint16_t EXTERN_SHARED(nThreads);
extern DEVICE omptarget_nvptx_ThreadPrivateContext *
EXTERN_SHARED(omptarget_nvptx_threadPrivateContext);
extern DEVICE uint32_t EXTERN_SHARED(execution_param);
extern DEVICE void *EXTERN_SHARED(ReductionScratchpadPtr);
////////////////////////////////////////////////////////////////////////////////
// work function (outlined parallel/simd functions) and arguments.
// needed for L1 parallelism only.
////////////////////////////////////////////////////////////////////////////////
typedef void *omptarget_nvptx_WorkFn;
extern volatile DEVICE
omptarget_nvptx_WorkFn EXTERN_SHARED(omptarget_nvptx_workFn);
////////////////////////////////////////////////////////////////////////////////
// get private data structures
////////////////////////////////////////////////////////////////////////////////
INLINE omptarget_nvptx_TeamDescr &getMyTeamDescriptor();
INLINE omptarget_nvptx_WorkDescr &getMyWorkDescriptor();
INLINE omptarget_nvptx_TaskDescr *
getMyTopTaskDescriptor(bool isSPMDExecutionMode);
INLINE omptarget_nvptx_TaskDescr *getMyTopTaskDescriptor(int globalThreadId);
////////////////////////////////////////////////////////////////////////////////
// inlined implementation
////////////////////////////////////////////////////////////////////////////////
INLINE uint32_t __kmpc_impl_ffs(uint32_t x) { return __builtin_ffs(x); }
INLINE uint32_t __kmpc_impl_popc(uint32_t x) { return __builtin_popcount(x); }
INLINE uint32_t __kmpc_impl_ffs(uint64_t x) { return __builtin_ffsl(x); }
INLINE uint32_t __kmpc_impl_popc(uint64_t x) { return __builtin_popcountl(x); }
#include "common/omptargeti.h"
#endif
|
NLmean_propag1dir_sspacing6_tspacing4_sim12_acc12_neighbor5_tau0100.c | /*
* compile: gcc -O3 -std=c99 -o [filename_out] -fopenmp [filename].c -lm -I/usr/include/netcdf-3/ -L/usr/lib64/ -lnetcdf -lnetcdf_c++
* in the terminal: export OMP_NUM_THREADS=3
*/
#include<stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <netcdf.h>
#include <omp.h>
/* This is the name of the data file we will read. */
#define FILENAME_RD "/data/PhDworks/isotropic/NLM/Udiff_spacespacing6.nc"
#define FILENAME_WR "/data/PhDworks/isotropic/NLM/NLmean_propag1dir_sspacing6_tspacing4_sim12_acc12_neighbor5_tau0100.nc"
/* all constants */
#define N_HR 96
#define SCALE_FACTOR_SPACE 6
#define SCALE_FACTOR_TIME 4
#define SIM_HAFTSIZE 12
#define ACC_HAFTSIZE 12
#define NEIGHBOR_HAFTSIZE 5
#define SIM_FULLSIZE (2 * SIM_HAFTSIZE + 1)
#define ACC_FULLSIZE (2 * ACC_HAFTSIZE + 1)
#define NEIGHBOR_FULLSIZE (2 * NEIGHBOR_HAFTSIZE + 1)
#define TAU 0.1
#define NUM_VARS 1
#define NUM_SCALES 2
#define NUM_3DSNAPS 37 /* #3D snapshots */
#define NUM_BLOCKS N_HR/SCALE_FACTOR_TIME - 1 /* #(1:SCALE_FACTOR_TIME:N_HR) - 1*/
#define NUM_2DSNAPS (SCALE_FACTOR_TIME * NUM_BLOCKS + 1) /* #2D snapshots in each 3D block */
#define NDIMS 4
/* Handle errors by printing an error message and exiting with a non-zero status. */
#define ERRCODE 2
#define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);}
/* **********************************************************************************/
/* ****************************** USEFUL FUNCTIONS **********************************/
/* **********************************************************************************/
/*
* get_onesnap: take part of a big array(arr1) and put to small one (arr2): arr2 = arr1[id_start:id_end]
*/
void get_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr2[i - id_start] = arr1[i];
}
/*
* put_onesnap: assign small array (arr2) into biger one (arr1): arr1[id_start:id_end] = arr2
*/
void put_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr1[i] = arr2[i - id_start];
}
/*
* norm_by_weight: normalize x[dim] by weight W[dim]
*/
void norm_by_weight(int dim, double *x, double *W)
{
for (int k = 0; k < dim; k++)
x[k] = x[k]/W[k];
}
void add_mat(int dim, double *sum, double *x1, double *x2)
{
for (int k = 0; k < dim; k++)
sum[k] = x1[k] + x2[k];
}
void initialize(int dim, double *x, double val)
{
for (int k = 0; k < dim; k++)
x[k] = val;
}
/* **********************************************************************************/
/* ****************************** NETCDF UTILS **************************************/
/* **********************************************************************************/
/*
* creat_netcdf: create the netcdf file [filename] contain [num_vars] variables
* variable names are [varname]
*/
void create_netcdf(char *filename, int num_vars, char *varname[num_vars])
{
int ncid_wr, retval_wr;
int vel_varid_wr;
int Nt, Nx, Ny, Nz;
int dimids[NDIMS];
/* Create the file. */
if ((retval_wr = nc_create(filename, NC_CLOBBER, &ncid_wr)))
ERR(retval_wr);
/* Define the dimensions. The record dimension is defined to have
* unlimited length - it can grow as needed.*/
if ((retval_wr = nc_def_dim(ncid_wr, "Ny", N_HR, &Ny)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nz", N_HR, &Nz)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nt", NC_UNLIMITED, &Nt)))
ERR(retval_wr);
/* Define the netCDF variables for the data. */
dimids[0] = Nt;
dimids[1] = Nx;
dimids[2] = Ny;
dimids[3] = Nz;
for (int i = 0; i<num_vars; i++)
{
if ((retval_wr = nc_def_var(ncid_wr, varname[i], NC_FLOAT, NDIMS, dimids, &vel_varid_wr)))
ERR(retval_wr);
}
/* End define mode (SHOULD NOT FORGET THIS!). */
if ((retval_wr = nc_enddef(ncid_wr)))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS creating file: %s!\n", filename);
}
/*
* write_netcdf:
* write into [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps] started at [snap_start]
*/
void write_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_wr, retval_wr;
int vel_varid_wr;
/* Open the file. NC_WRITE tells netCDF we want read-only access to the file.*/
if ((retval_wr = nc_open(filename, NC_WRITE, &ncid_wr)))
ERR(retval_wr);
/* Get variable*/
if ((retval_wr = nc_inq_varid(ncid_wr, varname, &vel_varid_wr)))
ERR(retval_wr);;
/* Put variable*/
if ((retval_wr = nc_put_vara_double(ncid_wr, vel_varid_wr, start, count, &snaps[0])))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS writing variables \"%s\" to \"%s\"!\n", varname, filename);
}
/*
* read_netcdf: read from [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps]
* started at [snap_start]
*/
void read_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_rd, retval_rd;
int vel_varid_rd;
/* ******** PREPARE TO READ ************* */
/* Open the file. NC_NOWRITE tells netCDF we want read-only access to the file.*/
if ((retval_rd = nc_open(filename, NC_NOWRITE, &ncid_rd)))
ERR(retval_rd);
/* Get the varids of the velocity in netCDF */
if ((retval_rd = nc_inq_varid(ncid_rd, varname, &vel_varid_rd)))
ERR(retval_rd);
if ((retval_rd = nc_get_vara_double(ncid_rd, vel_varid_rd, start, count, &snaps[0])))
ERR(retval_rd);
/* Close the file, freeing all resources. */
if ((retval_rd = nc_close(ncid_rd)))
ERR(retval_rd);
printf("\n *** SUCCESS reading variables \"%s\" from \"%s\" \n", varname, filename);
}
/* **********************************************************************************/
/* ****************************** ESTIMATE_DISTANCE *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
void generate_grids(int *gridpatches_y, int *gridpatches_z, int * acc_ids)
{
int neighbor_id, sim_id;
int gridyoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], gridzoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE];
for (int m = 0; m < NEIGHBOR_FULLSIZE; m++)
{
for (int n = 0; n < NEIGHBOR_FULLSIZE; n++)
{
gridyoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = m - NEIGHBOR_HAFTSIZE;
gridzoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = n - NEIGHBOR_HAFTSIZE;
}
}
int gridyoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE], gridzoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
{
for (int q = 0; q < SIM_FULLSIZE; q++)
{
gridyoffset_sim[p * SIM_FULLSIZE + q] = p - SIM_HAFTSIZE;
gridzoffset_sim[p * SIM_FULLSIZE + q] = q - SIM_HAFTSIZE;
}
}
int grid_sim[SIM_FULLSIZE][SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
for (int q = 0; q < SIM_FULLSIZE; q++)
grid_sim[p][q] = p * SIM_FULLSIZE + q;
for (int p = 0; p < ACC_FULLSIZE; p++)
for (int q = 0; q < ACC_FULLSIZE; q++)
acc_ids[p * ACC_FULLSIZE + q] = grid_sim[SIM_HAFTSIZE - ACC_HAFTSIZE + p][SIM_HAFTSIZE - ACC_HAFTSIZE + q];
int valy, valz;
long int grid_id;
for (int i = 0; i < N_HR; i++)
{
for (int j = 0; j < N_HR; j++)
{
for (int neighbor_id = 0; neighbor_id < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; neighbor_id++)
{
for (int sim_id = 0; sim_id < SIM_FULLSIZE * SIM_FULLSIZE; sim_id++)
{
grid_id = i * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ j * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ neighbor_id * SIM_FULLSIZE * SIM_FULLSIZE + sim_id;
valy = i + gridyoffset_neighbor[neighbor_id] + gridyoffset_sim[sim_id];
valz = j + gridzoffset_neighbor[neighbor_id] + gridzoffset_sim[sim_id];
if (valy < 0)
gridpatches_y[grid_id] = (N_HR - 1) + valy;
else if (valy > (N_HR - 1))
gridpatches_y[grid_id] = valy - (N_HR - 1);
else
gridpatches_y[grid_id] = valy;
if (valz < 0)
gridpatches_z[grid_id] = (N_HR - 1) + valz;
else if (valz > (N_HR - 1))
gridpatches_z[grid_id] = valz - (N_HR - 1);
else
gridpatches_z[grid_id] = valz;
}
}
}
}
//printf("\n gridpatches_z: %i \n", gridpatches_y[0]);
}
/* **********************************************************************************/
/* ****************************** NLMEAN *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
/*void fusion(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion,
int gridpatches_y[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int gridpatches_z[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int acc_ids[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], int est_idy, int est_idz)*/
void NLmean(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion, int *gridy, int *gridz, int *accids)
{
double norm_fact = 1.0/((double) (SIM_FULLSIZE * SIM_FULLSIZE));
int ri = NEIGHBOR_HAFTSIZE * NEIGHBOR_FULLSIZE + NEIGHBOR_HAFTSIZE;
int est_idy;
#pragma omp parallel for private (est_idy)
for (est_idy = 0; est_idy < N_HR; est_idy++)
for (int est_idz = 0; est_idz < N_HR; est_idz++)
for (int ni = 0; ni < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; ni++)
{
int ref_idy, ref_idz, moving_idy, moving_idz;
double du;
double d = 0.0;
long int grid_rid, grid_nid;
for (int si = 0; si < SIM_FULLSIZE * SIM_FULLSIZE; si++)
{
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + si ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + si;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
//compute distance btw reference patch and fusion patch
du = x_ref[ref_idy * N_HR + ref_idz] - x_moving[moving_idy * N_HR + moving_idz];
d = d + norm_fact*du*du;
}
double w = exp(-d/(2.0*TAU*TAU));
for(int k = 0; k < ACC_FULLSIZE * ACC_FULLSIZE; k++)
{
int ai = accids[k];
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + ai ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + ai;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
x_NLM[ref_idy * N_HR + ref_idz] = x_NLM[ref_idy * N_HR + ref_idz] + w*x_fusion[moving_idy * N_HR + moving_idz];
weight_NLM[ref_idy * N_HR + ref_idz] = weight_NLM[ref_idy * N_HR + ref_idz] + w;
}
//printf("\n w=%f\n ",w);
}
}
void propag_forward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_bound1, int t_offset)
{
for (int t_est = t_first + 1; t_est <= t_bound1; t_est++)
{
int t_prev = t_est - 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_backward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_last, int t_bound2, int t_offset)
{
for (int t_est = t_last - 1; t_est >= t_bound2; --t_est)
{
int t_prev = t_est + 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_2planes(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_mid, int t_offset)
{
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
int t_prev = t_mid - 1;
int t_after = t_mid + 1;
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
get_onesnap(Xlf, xref_lf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
}
void propag_towardcenter(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_offset)
{
double xref1_lf[N_HR * N_HR], xref2_lf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR];
double xref1_hf[N_HR * N_HR], w1[N_HR * N_HR], xref2_hf[N_HR * N_HR], w2[N_HR * N_HR];
int tc = (int)SCALE_FACTOR_TIME/2;
if (SCALE_FACTOR_TIME % 2) { tc = (int)SCALE_FACTOR_TIME/2 + 1; }
for (int td = 1; td < tc; td++)
{
int t1 = t_first + td; // bound on left side
int t2 = t_first + SCALE_FACTOR_TIME - td; // bound on right side
// Initialize with zeros
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
initialize(N_HR * N_HR, xref2_hf, 0.0);
initialize(N_HR * N_HR, w2, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xref2_lf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
//Propagate from left bound
get_onesnap(Xlf, xmov_lf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
//Propagate from right bound
get_onesnap(Xlf, xmov_lf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
norm_by_weight(N_HR*N_HR, xref2_hf, w2);
put_onesnap(Xrec, xref2_hf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
}
// Last plane in the center
if (SCALE_FACTOR_TIME % 2 == 0)
{
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
}
}
/* **********************************************************************************/
/* ********************************** MAIN FUNCTION *********************************/
/* **********************************************************************************/
int main()
{
/* Creat the file to save results */
char *varnames[NUM_VARS] = {"x_rec_all"};
create_netcdf(FILENAME_WR, NUM_VARS, varnames);
/* Allocate memory */
double *x_fusion_lf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_fusion_hf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_rec_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
/* read all snapshots */
size_t start_ids[4] = {0, 0, 0, 0};
size_t count_ids[4] = {NUM_3DSNAPS, NUM_2DSNAPS, N_HR, N_HR };
read_netcdf(FILENAME_RD, "Uinterp_all", start_ids, count_ids, x_fusion_lf_all);
read_netcdf(FILENAME_RD, "Udiff_all", start_ids, count_ids, x_fusion_hf_all);
double time_all_start = omp_get_wtime();
double *x_current_lf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_current_hf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_rec = (double*)malloc(N_HR * N_HR * sizeof(double));
long int grid_size = N_HR * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE;
int *gridpatches_y = (int*)malloc(grid_size * sizeof(int));
int *gridpatches_z = (int*)malloc(grid_size * sizeof(int));
int *acc_ids = (int*)malloc(ACC_FULLSIZE * ACC_FULLSIZE * sizeof(int));
generate_grids(gridpatches_y, gridpatches_z, acc_ids);
for(int snap3d_id = 0; snap3d_id < NUM_3DSNAPS; snap3d_id++)
{
int t_offset = snap3d_id * NUM_2DSNAPS * N_HR*N_HR;
// put first PIV
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
int block_id;
for(block_id = 0; block_id < NUM_BLOCKS; block_id++)
{
double time_start = omp_get_wtime();
int t_first = SCALE_FACTOR_TIME*block_id;
int t_last = SCALE_FACTOR_TIME*(block_id+1);
// Put last PIV of the block
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
if (SCALE_FACTOR_TIME % 2)
{
int t_bound1 = t_first + (int)SCALE_FACTOR_TIME/2;
int t_bound2 = t_bound1 + 1;
propag_forward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_bound1, t_offset);
propag_backward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_last, t_bound2, t_offset);
}
else
{
int t_mid = t_first + (int)SCALE_FACTOR_TIME/2;
int t_bound1 = t_mid - 1;
int t_bound2 = t_mid + 1;
propag_forward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_bound1, t_offset);
propag_backward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_last, t_bound2, t_offset);
propag_2planes(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_mid, t_offset);
printf("\n Estimated block %i (total 23) in 3D snapshot %i (total 37) in %f seconds \n", block_id, snap3d_id, (double)omp_get_wtime() - time_start);
}
}
}
// Write to file
write_netcdf(FILENAME_WR, "x_rec_all", start_ids, count_ids, x_rec_all);
/* free memory */
free(x_rec); free(x_current_lf); free(x_current_hf);
free(x_rec_all); free(x_fusion_lf_all); free(x_fusion_hf_all);
free(gridpatches_y); free(gridpatches_z); free(acc_ids);
printf("\n FINISH ALL COMPUTATION IN %f SECONDS \n", (double)omp_get_wtime() - time_all_start);
return 1;
}
|
multiway_merge.h | /***************************************************************************
* include/stxxl/bits/parallel/multiway_merge.h
*
* Implementation of sequential and parallel multiway merge.
* Extracted from MCSTL - http://algo2.iti.uni-karlsruhe.de/singler/mcstl/
*
* Part of the STXXL. See http://stxxl.sourceforge.net
*
* Copyright (C) 2007 Johannes Singler <singler@ira.uka.de>
* Copyright (C) 2014 Timo Bingmann <tb@panthema.net>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
**************************************************************************/
#ifndef STXXL_PARALLEL_MULTIWAY_MERGE_HEADER
#define STXXL_PARALLEL_MULTIWAY_MERGE_HEADER
#include <vector>
#include <iterator>
#include <algorithm>
#include <stxxl/bits/verbose.h>
#include <stxxl/bits/common/is_sorted.h>
#include <stxxl/bits/common/utils.h>
#include <stxxl/bits/parallel/merge.h>
#include <stxxl/bits/parallel/losertree.h>
#include <stxxl/bits/parallel/settings.h>
#include <stxxl/bits/parallel/equally_split.h>
#include <stxxl/bits/parallel/multiseq_selection.h>
#include <stxxl/bits/parallel/timing.h>
#include <stxxl/bits/parallel/tags.h>
STXXL_BEGIN_NAMESPACE
namespace parallel {
//! Length of a sequence described by a pair of iterators.
template <typename RandomAccessIteratorPair>
typename std::iterator_traits<
typename RandomAccessIteratorPair::first_type
>::difference_type
iterpair_size(const RandomAccessIteratorPair& p)
{
return p.second - p.first;
}
/*!
* Iterator wrapper supporting an implicit supremum at the end of the sequence,
* dominating all comparisons. Deriving from RandomAccessIterator is not
* possible since RandomAccessIterator need not be a class.
*/
template <typename RandomAccessIterator, typename Comparator>
class guarded_iterator
{
public:
//! Our own type
typedef guarded_iterator<RandomAccessIterator, Comparator> self_type;
//! Value type of the iterator
typedef typename std::iterator_traits<RandomAccessIterator>::value_type value_type;
protected:
//! Current iterator position.
RandomAccessIterator current;
//! End iterator of the sequence.
RandomAccessIterator end;
//! Comparator.
Comparator& comp;
public:
/*!
* Constructor. Sets iterator to beginning of sequence.
* \param begin Begin iterator of sequence.
* \param end End iterator of sequence.
* \param comp Comparator provided for associated overloaded compare
* operators.
*/
guarded_iterator(RandomAccessIterator begin, RandomAccessIterator end,
Comparator& comp)
: current(begin), end(end), comp(comp)
{ }
/*!
* Pre-increment operator.
* \return This.
*/
self_type& operator ++ ()
{
++current;
return *this;
}
/*!
* Dereference operator.
* \return Referenced element.
*/
value_type& operator * ()
{
return *current;
}
/*!
* Convert to wrapped iterator.
* \return Wrapped iterator.
*/
RandomAccessIterator & iterator()
{
return current;
}
/*!
* Compare two elements referenced by guarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less.
*/
friend bool operator < (self_type& bi1, self_type& bi2)
{
if (bi1.current == bi1.end) // bi1 is sup
return bi2.current == bi2.end; // bi2 is not sup
if (bi2.current == bi2.end) // bi2 is sup
return true;
return bi1.comp(*bi1, *bi2); // normal compare
}
/*!
* Compare two elements referenced by guarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less equal.
*/
friend bool operator <= (self_type& bi1, self_type& bi2)
{
if (bi2.current == bi2.end) //bi1 is sup
return bi1.current != bi1.end; //bi2 is not sup
if (bi1.current == bi1.end) //bi2 is sup
return false;
return !bi1.comp(*bi2, *bi1); //normal compare
}
};
template <typename RandomAccessIterator, typename Comparator>
class unguarded_iterator
{
public:
//! Our own type
typedef unguarded_iterator<RandomAccessIterator, Comparator> self_type;
//! Value type of the iterator
typedef typename std::iterator_traits<RandomAccessIterator>::value_type value_type;
protected:
//! Current iterator position.
RandomAccessIterator current;
//! Comparator.
Comparator& comp;
public:
/*!
* Constructor. Sets iterator to beginning of sequence.
* \param begin Begin iterator of sequence.
* param end Unused, only for compatibility.
* \param comp Unused, only for compatibility.
*/
unguarded_iterator(RandomAccessIterator begin,
RandomAccessIterator /* end */,
Comparator& comp)
: current(begin), comp(comp)
{ }
/*!
* Pre-increment operator.
* \return This.
*/
self_type& operator ++ ()
{
++current;
return *this;
}
/*!
* Dereference operator.
* \return Referenced element.
*/
value_type& operator * ()
{
return *current;
}
/*!
* Convert to wrapped iterator.
* \return Wrapped iterator.
*/
RandomAccessIterator & iterator()
{
return current;
}
/*!
* Compare two elements referenced by unguarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less.
*/
friend bool operator < (self_type& bi1, self_type& bi2)
{
return bi1.comp(*bi1, *bi2); // normal compare, unguarded
}
/*!
* Compare two elements referenced by unguarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less equal.
*/
friend bool operator <= (self_type& bi1, self_type& bi2)
{
return !bi1.comp(*bi2, *bi1); // normal compare, unguarded
}
};
/*!
* Prepare a set of sequences to be merged without a (end) guard
*
* \param seqs_begin
* \param seqs_end
* \param comp
* \param min_sequence
* \tparam Stable
* \pre (seqs_end - seqs_begin > 0)
*/
template <bool Stable, typename RandomAccessIteratorIterator, typename Comparator>
typename std::iterator_traits<
typename std::iterator_traits<RandomAccessIteratorIterator>::value_type::first_type
>::difference_type
prepare_unguarded(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
Comparator comp,
int& min_sequence)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
typedef typename std::iterator_traits<RandomAccessIterator>
::difference_type diff_type;
if ((*seqs_begin).first == (*seqs_begin).second)
{
// empty sequence found, it's the first one
min_sequence = 0;
return -1;
}
// last element in sequence
value_type min = *((*seqs_begin).second - 1);
min_sequence = 0;
for (RandomAccessIteratorIterator s = seqs_begin + 1; s != seqs_end; ++s)
{
if ((*s).first == (*s).second)
{
// empty sequence found
min_sequence = static_cast<int>(s - seqs_begin);
return -1;
}
const value_type& v = *((*s).second - 1);
if (comp(v, min))
{
// last element in sequence is strictly smaller
min = v;
min_sequence = static_cast<int>(s - seqs_begin);
}
}
diff_type overhang_size = 0;
int s = 0;
for (s = 0; s <= min_sequence; ++s)
{
RandomAccessIterator split;
if (Stable)
split = std::upper_bound(seqs_begin[s].first, seqs_begin[s].second,
min, comp);
else
split = std::lower_bound(seqs_begin[s].first, seqs_begin[s].second,
min, comp);
overhang_size += seqs_begin[s].second - split;
}
for ( ; s < (seqs_end - seqs_begin); ++s)
{
RandomAccessIterator split =
std::lower_bound(seqs_begin[s].first, seqs_begin[s].second,
min, comp);
overhang_size += seqs_begin[s].second - split;
}
return overhang_size; // so many elements will be left over afterwards
}
/*!
* Prepare a set of sequences to be merged with a (end) guard (sentinel)
* \param seqs_begin
* \param seqs_end
* \param comp
*/
template <typename RandomAccessIteratorIterator, typename Comparator>
typename std::iterator_traits<
typename std::iterator_traits<RandomAccessIteratorIterator>::value_type::first_type
>::difference_type
prepare_unguarded_sentinel(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
Comparator comp)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
typedef typename std::iterator_traits<RandomAccessIterator>
::difference_type diff_type;
value_type* max_value = NULL; // last element in sequence
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
{
if ((*s).first == (*s).second)
continue;
value_type& v = *((*s).second - 1); //last element in sequence
if (!max_value || comp(*max_value, v)) //strictly greater
max_value = &v;
}
diff_type overhang_size = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
{
RandomAccessIterator split = std::lower_bound((*s).first, (*s).second, *max_value, comp);
overhang_size += (*s).second - split;
*((*s).second) = *max_value; //set sentinel
}
return overhang_size; // so many elements will be left over afterwards
}
/*!
* Highly efficient 3-way merging procedure.
*
* Merging is done with the algorithm implementation described by Peter
* Sanders. Basically, the idea is to minimize the number of necessary
* comparison after merging an element. The implementation trick that makes
* this fast is that the order of the sequences is stored in the instruction
* pointer (translated into labels in C++).
*
* This works well for merging up to 4 sequences.
*
* Note that making the merging stable does \a not come at a performance hit.
*
* Whether the merging is done guarded or unguarded is selected by the used
* iterator class.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \return End iterator of output sequence.
*/
template <template <typename RAI, typename C> class Iterator,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_3_variant(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 3);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
if (length == 0)
return target;
#if STXXL_DEBUG_ASSERTIONS
ssize_t orig_length = length;
#endif
Iterator<RandomAccessIterator, Comparator>
seq0(seqs_begin[0].first, seqs_begin[0].second, comp),
seq1(seqs_begin[1].first, seqs_begin[1].second, comp),
seq2(seqs_begin[2].first, seqs_begin[2].second, comp);
if (seq0 <= seq1)
{
if (seq1 <= seq2)
goto s012;
else if (seq2 < seq0)
goto s201;
else
goto s021;
}
else
{
if (seq1 <= seq2)
{
if (seq0 <= seq2)
goto s102;
else
goto s120;
}
else
goto s210;
}
#define STXXL_MERGE3CASE(a, b, c, c0, c1) \
s ## a ## b ## c : \
*target = *seq ## a; \
++target; \
--length; \
++seq ## a; \
if (length == 0) goto finish; \
if (seq ## a c0 seq ## b) goto s ## a ## b ## c; \
if (seq ## a c1 seq ## c) goto s ## b ## a ## c; \
goto s ## b ## c ## a;
STXXL_MERGE3CASE(0, 1, 2, <=, <=);
STXXL_MERGE3CASE(1, 2, 0, <=, <);
STXXL_MERGE3CASE(2, 0, 1, <, <);
STXXL_MERGE3CASE(1, 0, 2, <, <=);
STXXL_MERGE3CASE(0, 2, 1, <=, <=);
STXXL_MERGE3CASE(2, 1, 0, <, <);
#undef STXXL_MERGE3CASE
finish:
;
#if STXXL_DEBUG_ASSERTIONS
STXXL_CHECK_EQUAL((seq0.iterator() - seqs_begin[0].first) +
(seq1.iterator() - seqs_begin[1].first) +
(seq2.iterator() - seqs_begin[2].first),
orig_length);
#endif
seqs_begin[0].first = seq0.iterator();
seqs_begin[1].first = seq1.iterator();
seqs_begin[2].first = seq2.iterator();
return target;
}
template <typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_3_combined(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 3);
int min_seq;
RandomAccessIterator3 target_end;
DiffType overhang = prepare_unguarded<true>(seqs_begin, seqs_end, comp, min_seq);
DiffType total_length = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
total_length += iterpair_size(*s);
if (overhang != (DiffType)(-1))
{
DiffType unguarded_length = std::min(length, total_length - overhang);
target_end = multiway_merge_3_variant<unguarded_iterator>
(seqs_begin, seqs_end, target, unguarded_length, comp);
overhang = length - unguarded_length;
}
else
{
// empty sequence found
overhang = length;
target_end = target;
}
STXXL_DEBUG_ASSERT(target_end == target + length - overhang);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
switch (min_seq)
{
case 0:
// iterators will be advanced accordingly
target_end = merge_advance(
seqs_begin[1].first, seqs_begin[1].second,
seqs_begin[2].first, seqs_begin[2].second,
target_end, overhang, comp);
break;
case 1:
target_end = merge_advance(
seqs_begin[0].first, seqs_begin[0].second,
seqs_begin[2].first, seqs_begin[2].second,
target_end, overhang, comp);
break;
case 2:
target_end = merge_advance(
seqs_begin[0].first, seqs_begin[0].second,
seqs_begin[1].first, seqs_begin[1].second,
target_end, overhang, comp);
break;
default:
assert(false);
}
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
return target_end;
}
/*!
* Highly efficient 4-way merging procedure.
*
* Merging is done with the algorithm implementation described by Peter
* Sanders. Basically, the idea is to minimize the number of necessary
* comparison after merging an element. The implementation trick that makes
* this fast is that the order of the sequences is stored in the instruction
* pointer (translated into goto labels in C++).
*
* This works well for merging up to 4 sequences.
*
* Note that making the merging stable does \a not come at a performance hit.
*
* Whether the merging is done guarded or unguarded is selected by the used
* iterator class.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \return End iterator of output sequence.
*/
template <template <typename RAI, typename C> class iterator,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_4_variant(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 4);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
if (length == 0)
return target;
#if STXXL_DEBUG_ASSERTIONS
ssize_t orig_length = length;
#endif
iterator<RandomAccessIterator, Comparator>
seq0(seqs_begin[0].first, seqs_begin[0].second, comp),
seq1(seqs_begin[1].first, seqs_begin[1].second, comp),
seq2(seqs_begin[2].first, seqs_begin[2].second, comp),
seq3(seqs_begin[3].first, seqs_begin[3].second, comp);
#define STXXL_DECISION(a, b, c, d) do { \
if (seq ## d < seq ## a) goto s ## d ## a ## b ## c; \
if (seq ## d < seq ## b) goto s ## a ## d ## b ## c; \
if (seq ## d < seq ## c) goto s ## a ## b ## d ## c; \
goto s ## a ## b ## c ## d; \
} \
while (0)
if (seq0 <= seq1)
{
if (seq1 <= seq2)
STXXL_DECISION(0, 1, 2, 3);
else if (seq2 < seq0)
STXXL_DECISION(2, 0, 1, 3);
else
STXXL_DECISION(0, 2, 1, 3);
}
else
{
if (seq1 <= seq2)
{
if (seq0 <= seq2)
STXXL_DECISION(1, 0, 2, 3);
else
STXXL_DECISION(1, 2, 0, 3);
}
else
STXXL_DECISION(2, 1, 0, 3);
}
#define STXXL_MERGE4CASE(a, b, c, d, c0, c1, c2) \
s ## a ## b ## c ## d : \
if (length == 0) goto finish; \
*target = *seq ## a; \
++target; \
--length; \
++seq ## a; \
if (seq ## a c0 seq ## b) goto s ## a ## b ## c ## d; \
if (seq ## a c1 seq ## c) goto s ## b ## a ## c ## d; \
if (seq ## a c2 seq ## d) goto s ## b ## c ## a ## d; \
goto s ## b ## c ## d ## a;
STXXL_MERGE4CASE(0, 1, 2, 3, <=, <=, <=);
STXXL_MERGE4CASE(0, 1, 3, 2, <=, <=, <=);
STXXL_MERGE4CASE(0, 2, 1, 3, <=, <=, <=);
STXXL_MERGE4CASE(0, 2, 3, 1, <=, <=, <=);
STXXL_MERGE4CASE(0, 3, 1, 2, <=, <=, <=);
STXXL_MERGE4CASE(0, 3, 2, 1, <=, <=, <=);
STXXL_MERGE4CASE(1, 0, 2, 3, <, <=, <=);
STXXL_MERGE4CASE(1, 0, 3, 2, <, <=, <=);
STXXL_MERGE4CASE(1, 2, 0, 3, <=, <, <=);
STXXL_MERGE4CASE(1, 2, 3, 0, <=, <=, <);
STXXL_MERGE4CASE(1, 3, 0, 2, <=, <, <=);
STXXL_MERGE4CASE(1, 3, 2, 0, <=, <=, <);
STXXL_MERGE4CASE(2, 0, 1, 3, <, <, <=);
STXXL_MERGE4CASE(2, 0, 3, 1, <, <=, <);
STXXL_MERGE4CASE(2, 1, 0, 3, <, <, <=);
STXXL_MERGE4CASE(2, 1, 3, 0, <, <=, <);
STXXL_MERGE4CASE(2, 3, 0, 1, <=, <, <);
STXXL_MERGE4CASE(2, 3, 1, 0, <=, <, <);
STXXL_MERGE4CASE(3, 0, 1, 2, <, <, <);
STXXL_MERGE4CASE(3, 0, 2, 1, <, <, <);
STXXL_MERGE4CASE(3, 1, 0, 2, <, <, <);
STXXL_MERGE4CASE(3, 1, 2, 0, <, <, <);
STXXL_MERGE4CASE(3, 2, 0, 1, <, <, <);
STXXL_MERGE4CASE(3, 2, 1, 0, <, <, <);
#undef STXXL_MERGE4CASE
#undef STXXL_DECISION
finish:
;
#if STXXL_DEBUG_ASSERTIONS
STXXL_CHECK_EQUAL((seq0.iterator() - seqs_begin[0].first) +
(seq1.iterator() - seqs_begin[1].first) +
(seq2.iterator() - seqs_begin[2].first) +
(seq3.iterator() - seqs_begin[3].first),
orig_length);
#endif
seqs_begin[0].first = seq0.iterator();
seqs_begin[1].first = seq1.iterator();
seqs_begin[2].first = seq2.iterator();
seqs_begin[3].first = seq3.iterator();
return target;
}
template <typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_4_combined(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 4);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type RandomAccessIteratorPair;
int min_seq;
RandomAccessIterator3 target_end;
DiffType overhang = prepare_unguarded<true>(seqs_begin, seqs_end, comp, min_seq);
DiffType total_length = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
total_length += iterpair_size(*s);
if (overhang != (DiffType) - 1)
{
DiffType unguarded_length = std::min(length, total_length - overhang);
target_end = multiway_merge_4_variant<unguarded_iterator>
(seqs_begin, seqs_end, target, unguarded_length, comp);
overhang = length - unguarded_length;
}
else
{
// empty sequence found
overhang = length;
target_end = target;
}
STXXL_DEBUG_ASSERT(target_end == target + length - overhang);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
std::vector<RandomAccessIteratorPair> one_missing(seqs_begin, seqs_end);
one_missing.erase(one_missing.begin() + min_seq); //remove
target_end = multiway_merge_3_variant<guarded_iterator>(one_missing.begin(), one_missing.end(), target_end, overhang, comp);
one_missing.insert(one_missing.begin() + min_seq, seqs_begin[min_seq]); //insert back again
std::copy(one_missing.begin(), one_missing.end(), seqs_begin); //write back modified iterators
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
return target_end;
}
/*!
* Basic multi-way merging procedure.
*
* The head elements are kept in a sorted array, new heads are inserted
* linearly.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_bubble(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
// num remaining pieces
int k = static_cast<int>(seqs_end - seqs_begin), nrp;
value_type* pl = new value_type[k];
int* source = new int[k];
DiffType total_length = 0;
#define POS(i) seqs_begin[(i)].first
#define STOPS(i) seqs_begin[(i)].second
//write entries into queue
nrp = 0;
for (int pi = 0; pi < k; ++pi)
{
if (STOPS(pi) != POS(pi))
{
pl[nrp] = *(POS(pi));
source[nrp] = pi;
++nrp;
total_length += iterpair_size(seqs_begin[pi]);
}
}
if (Stable)
{
for (int k = 0; k < nrp - 1; ++k)
for (int pi = nrp - 1; pi > k; --pi)
if (comp(pl[pi], pl[pi - 1]) ||
(!comp(pl[pi - 1], pl[pi]) && source[pi] < source[pi - 1]))
{
std::swap(pl[pi - 1], pl[pi]);
std::swap(source[pi - 1], source[pi]);
}
}
else
{
for (int k = 0; k < nrp - 1; ++k)
for (int pi = nrp - 1; pi > k; --pi)
if (comp(pl[pi], pl[pi - 1]))
{
std::swap(pl[pi - 1], pl[pi]);
std::swap(source[pi - 1], source[pi]);
}
}
// iterate
if (Stable)
{
int j;
while (nrp > 0 && length > 0)
{
if (source[0] < source[1])
{
// pl[0] <= pl[1] ?
while ((nrp == 1 || !(comp(pl[1], pl[0]))) && length > 0)
{
*target = pl[0];
++target;
++POS(source[0]);
--length;
if (POS(source[0]) == STOPS(source[0]))
{
// move everything to the left
for (int s = 0; s < nrp - 1; ++s)
{
pl[s] = pl[s + 1];
source[s] = source[s + 1];
}
--nrp;
break;
}
else
pl[0] = *(POS(source[0]));
}
}
else
{
// pl[0] < pl[1] ?
while ((nrp == 1 || comp(pl[0], pl[1])) && length > 0)
{
*target = pl[0];
++target;
++POS(source[0]);
--length;
if (POS(source[0]) == STOPS(source[0]))
{
for (int s = 0; s < nrp - 1; ++s)
{
pl[s] = pl[s + 1];
source[s] = source[s + 1];
}
--nrp;
break;
}
else
pl[0] = *(POS(source[0]));
}
}
//sink down
j = 1;
while ((j < nrp) && (comp(pl[j], pl[j - 1]) ||
(!comp(pl[j - 1], pl[j]) && (source[j] < source[j - 1]))))
{
std::swap(pl[j - 1], pl[j]);
std::swap(source[j - 1], source[j]);
++j;
}
}
}
else
{
int j;
while (nrp > 0 && length > 0)
{
// pl[0] <= pl[1] ?
while ((nrp == 1 || !comp(pl[1], pl[0])) && length > 0)
{
*target = pl[0];
++target;
++POS(source[0]);
--length;
if (POS(source[0]) == STOPS(source[0]))
{
for (int s = 0; s < (nrp - 1); ++s)
{
pl[s] = pl[s + 1];
source[s] = source[s + 1];
}
--nrp;
break;
}
else
pl[0] = *(POS(source[0]));
}
//sink down
j = 1;
while ((j < nrp) && comp(pl[j], pl[j - 1]))
{
std::swap(pl[j - 1], pl[j]);
std::swap(source[j - 1], source[j]);
++j;
}
}
}
delete[] pl;
delete[] source;
return target;
}
/*!
* Multi-way merging procedure for a high branching factor, guarded case.
*
* The head elements are kept in a loser tree.
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <typename LoserTreeType,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename LoserTreeType::source_type source_type;
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
source_type k = static_cast<source_type>(seqs_end - seqs_begin);
LoserTreeType lt(k, comp);
DiffType total_length = 0;
const value_type* arbitrary_element = NULL;
// find an arbitrary element to avoid default construction
for (source_type t = 0; t < k; ++t)
{
if (!arbitrary_element && iterpair_size(seqs_begin[t]) > 0)
arbitrary_element = &(*seqs_begin[t].first);
total_length += iterpair_size(seqs_begin[t]);
}
for (source_type t = 0; t < k; ++t)
{
if (UNLIKELY(seqs_begin[t].first == seqs_begin[t].second))
lt.insert_start(*arbitrary_element, t, true);
else
lt.insert_start(*seqs_begin[t].first, t, false);
}
lt.init();
total_length = std::min(total_length, length);
for (DiffType i = 0; i < total_length; ++i)
{
// take out
source_type source = lt.get_min_source();
*target = *seqs_begin[source].first;
++target;
++seqs_begin[source].first;
// feed
if (seqs_begin[source].first == seqs_begin[source].second)
lt.delete_min_insert(*arbitrary_element, true);
else
// replace from same source
lt.delete_min_insert(*seqs_begin[source].first, false);
}
return target;
}
/*!
* Multi-way merging procedure for a high branching factor, unguarded case.
* The head elements are kept in a loser tree.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
* \pre No input will run out of elements during the merge.
*/
template <typename LoserTreeType,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType,
typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree_unguarded(
RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
int k = (int)(seqs_end - seqs_begin);
// sentinel is item at end of first sequence.
LoserTreeType lt(k, *(seqs_begin->second - 1), comp);
DiffType total_length = 0;
for (int t = 0; t < k; ++t)
{
assert(seqs_begin[t].first != seqs_begin[t].second);
lt.insert_start(*seqs_begin[t].first, t);
total_length += iterpair_size(seqs_begin[t]);
}
lt.init();
// do not go past end
length = std::min(total_length, length);
int source;
#if STXXL_DEBUG_ASSERTIONS
DiffType i = 0;
#endif
RandomAccessIterator3 target_end = target + length;
while (target < target_end)
{
// take out
source = lt.get_min_source();
#if STXXL_DEBUG_ASSERTIONS
assert(i == 0 || !comp(*(seqs_begin[source].first), *(target - 1)));
#endif
*target = *seqs_begin[source].first;
++seqs_begin[source].first;
++target;
#if STXXL_DEBUG_ASSERTIONS
assert((seqs_begin[source].first != seqs_begin[source].second) || (i == length - 1));
++i;
#endif
// feed
// replace from same source
lt.delete_min_insert(*seqs_begin[source].first);
}
return target;
}
template <bool Stable, class ValueType, class Comparator>
struct loser_tree_traits
{
public:
typedef LoserTreePointer<Stable, ValueType, Comparator> LT;
};
#define STXXL_NO_POINTER(T) \
template <bool Stable, class Comparator> \
struct loser_tree_traits<Stable, T, Comparator> \
{ \
typedef LoserTreeCopy<Stable, T, Comparator> LT; \
};
STXXL_NO_POINTER(unsigned char)
STXXL_NO_POINTER(char)
STXXL_NO_POINTER(unsigned short)
STXXL_NO_POINTER(short)
STXXL_NO_POINTER(unsigned int)
STXXL_NO_POINTER(int)
STXXL_NO_POINTER(unsigned long)
STXXL_NO_POINTER(long)
STXXL_NO_POINTER(unsigned long long)
STXXL_NO_POINTER(long long)
#undef STXXL_NO_POINTER
template <bool Stable, class ValueType, class Comparator>
class loser_tree_traits_unguarded
{
public:
typedef LoserTreePointerUnguarded<Stable, ValueType, Comparator> LT;
};
#define STXXL_NO_POINTER_UNGUARDED(T) \
template <bool Stable, class Comparator> \
struct loser_tree_traits_unguarded<Stable, T, Comparator> \
{ \
typedef LoserTreeCopyUnguarded<Stable, T, Comparator> LT; \
};
STXXL_NO_POINTER_UNGUARDED(unsigned char)
STXXL_NO_POINTER_UNGUARDED(char)
STXXL_NO_POINTER_UNGUARDED(unsigned short)
STXXL_NO_POINTER_UNGUARDED(short)
STXXL_NO_POINTER_UNGUARDED(unsigned int)
STXXL_NO_POINTER_UNGUARDED(int)
STXXL_NO_POINTER_UNGUARDED(unsigned long)
STXXL_NO_POINTER_UNGUARDED(long)
STXXL_NO_POINTER_UNGUARDED(unsigned long long)
STXXL_NO_POINTER_UNGUARDED(long long)
#undef STXXL_NO_POINTER_UNGUARDED
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree_combined(
RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
int min_seq;
RandomAccessIterator3 target_end;
DiffType overhang = prepare_unguarded<Stable>(seqs_begin, seqs_end, comp, min_seq);
DiffType total_length = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
total_length += iterpair_size(*s);
if (overhang != (DiffType)(-1))
{
DiffType unguarded_length = std::min(length, total_length - overhang);
target_end = multiway_merge_loser_tree_unguarded
<typename loser_tree_traits_unguarded<Stable, value_type, Comparator>::LT>
(seqs_begin, seqs_end, target, unguarded_length, comp);
overhang = length - unguarded_length;
}
else
{
// empty sequence found
overhang = length;
target_end = target;
}
STXXL_DEBUG_ASSERT(target_end == target + length - overhang);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
target_end = multiway_merge_loser_tree
<typename loser_tree_traits<Stable, value_type, Comparator>::LT>
(seqs_begin, seqs_end, target_end, overhang, comp);
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
return target_end;
}
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree_sentinel(
RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
// move end of sequences to include the sentinel for merging
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
++(*s).second;
RandomAccessIterator3 target_end
= multiway_merge_loser_tree_unguarded
<typename loser_tree_traits_unguarded<Stable, value_type, Comparator>::LT>
(seqs_begin, seqs_end, target, length, comp);
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
// restore end of sequences
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
--(*s).second;
return target_end;
}
/*!
* Sequential multi-way merging switch.
*
* The decision if based on the branching factor and runtime settings.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \tparam Sentinels The sequences have a sentinel element.
* \return End iterator of output sequence.
*/
template <bool Stable, bool Sentinels,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
sequential_multiway_merge(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
STXXL_DEBUG_ASSERT(stxxl::is_sorted((*s).first, (*s).second, comp));
RandomAccessIterator3 return_target = target;
int k = static_cast<int>(seqs_end - seqs_begin);
SETTINGS::MultiwayMergeAlgorithm mwma = SETTINGS::multiway_merge_algorithm;
if (!Sentinels && mwma == SETTINGS::LOSER_TREE_SENTINEL)
mwma = SETTINGS::LOSER_TREE_COMBINED;
switch (k)
{
case 0:
break;
case 1:
return_target = std::copy(seqs_begin[0].first,
seqs_begin[0].first + length,
target);
seqs_begin[0].first += length;
break;
case 2:
return_target = merge_advance(
seqs_begin[0].first, seqs_begin[0].second,
seqs_begin[1].first, seqs_begin[1].second,
target, length, comp);
break;
case 3:
switch (mwma)
{
case SETTINGS::LOSER_TREE_COMBINED:
return_target = multiway_merge_3_combined(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_SENTINEL:
return_target = multiway_merge_3_variant<unguarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
default:
return_target = multiway_merge_3_variant<guarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
}
break;
case 4:
switch (mwma)
{
case SETTINGS::LOSER_TREE_COMBINED:
return_target = multiway_merge_4_combined(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_SENTINEL:
return_target = multiway_merge_4_variant<unguarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
default:
return_target = multiway_merge_4_variant<guarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
}
break;
default:
{
switch (mwma)
{
case SETTINGS::BUBBLE:
return_target = multiway_merge_bubble<Stable>(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE:
return_target = multiway_merge_loser_tree<
typename loser_tree_traits<Stable, value_type, Comparator>::LT>(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_COMBINED:
return_target = multiway_merge_loser_tree_combined<Stable>(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_SENTINEL:
return_target = multiway_merge_loser_tree_sentinel<Stable>(
seqs_begin, seqs_end, target, length, comp);
break;
default:
assert(0 && "multiway_merge algorithm not implemented");
break;
}
}
}
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target + length, comp));
return return_target;
}
/*!
* Splitting method for parallel multi-way merge routine: use sampling and
* binary search for in-exact splitting.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param length Maximum length to merge.
* \param total_length Total length of all sequences combined.
* \param comp Comparator.
* \param chunks Output subsequences for num_threads.
* \param num_threads Split the sequences into for num_threads.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename DiffType,
typename Comparator>
void
parallel_multiway_merge_sampling_splitting(
const RandomAccessIteratorIterator& seqs_begin,
const RandomAccessIteratorIterator& seqs_end,
DiffType length, DiffType total_length, Comparator comp,
std::vector<typename std::iterator_traits<RandomAccessIteratorIterator>::value_type>* chunks,
const thread_index_t num_threads)
{
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
const DiffType num_seqs = seqs_end - seqs_begin;
const DiffType num_samples = num_threads * SETTINGS::merge_oversampling;
// pick samples
value_type* samples = new value_type[num_seqs * num_samples];
for (DiffType s = 0; s < num_seqs; ++s)
{
for (DiffType i = 0; i < num_samples; ++i)
{
DiffType sample_index = static_cast<DiffType>(
double(iterpair_size(seqs_begin[s]))
* (double(i + 1) / double(num_samples + 1))
* (double(length) / double(total_length))
);
samples[s * num_samples + i] = seqs_begin[s].first[sample_index];
}
}
if (Stable)
std::stable_sort(samples, samples + (num_samples * num_seqs), comp);
else
std::sort(samples, samples + (num_samples * num_seqs), comp);
// for each processor
for (thread_index_t slab = 0; slab < num_threads; ++slab)
{
// for each sequence
for (DiffType seq = 0; seq < num_seqs; ++seq)
{
if (slab > 0) {
chunks[slab][seq].first =
std::upper_bound(
seqs_begin[seq].first, seqs_begin[seq].second,
samples[num_samples * num_seqs * slab / num_threads],
comp);
}
else // absolute beginning
chunks[slab][seq].first = seqs_begin[seq].first;
if ((slab + 1) < num_threads) {
chunks[slab][seq].second =
std::upper_bound(
seqs_begin[seq].first, seqs_begin[seq].second,
samples[num_samples * num_seqs * (slab + 1) / num_threads],
comp);
}
else // absolute ending
chunks[slab][seq].second = seqs_begin[seq].second;
}
}
delete[] samples;
}
/*!
* Splitting method for parallel multi-way merge routine: use multisequence
* selection for exact splitting.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param length Maximum length to merge.
* \param total_length Total length of all sequences combined.
* \param comp Comparator.
* \param chunks Output subsequences for num_threads.
* \param num_threads Split the sequences into for num_threads.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename DiffType,
typename Comparator>
void
parallel_multiway_merge_exact_splitting(
const RandomAccessIteratorIterator& seqs_begin,
const RandomAccessIteratorIterator& seqs_end,
DiffType length, DiffType total_length, Comparator comp,
std::vector<typename std::iterator_traits<RandomAccessIteratorIterator>::value_type>* chunks,
const thread_index_t num_threads)
{
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type RandomAccessIteratorPair;
typedef typename RandomAccessIteratorPair
::first_type RandomAccessIterator;
const size_t num_seqs = seqs_end - seqs_begin;
const bool tight = (total_length == length);
std::vector<RandomAccessIterator>* offsets
= new std::vector<RandomAccessIterator>[num_threads];
std::vector<DiffType> ranks(num_threads + 1);
equally_split(length, num_threads, ranks.begin());
for (thread_index_t s = 0; s < (num_threads - 1); ++s)
{
offsets[s].resize(num_seqs);
multiseq_partition(seqs_begin, seqs_end,
ranks[s + 1], offsets[s].begin(), comp);
if (!tight) // last one also needed and available
{
offsets[num_threads - 1].resize(num_seqs);
multiseq_partition(seqs_begin, seqs_end,
length, offsets[num_threads - 1].begin(), comp);
}
}
// for each processor
for (thread_index_t slab = 0; slab < num_threads; ++slab)
{
// for each sequence
for (size_t s = 0; s < num_seqs; ++s)
{
if (slab == 0) // absolute beginning
chunks[slab][s].first = seqs_begin[s].first;
else
chunks[slab][s].first = offsets[slab - 1][s];
if (!tight || slab < (num_threads - 1))
chunks[slab][s].second = offsets[slab][s];
else // slab == num_threads - 1
chunks[slab][s].second = seqs_begin[s].second;
}
}
delete[] offsets;
}
#if STXXL_PARALLEL
/*!
* Parallel multi-way merge routine.
*
* The decision if based on the branching factor and runtime settings.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType,
typename Comparator>
RandomAccessIterator3
parallel_multiway_merge(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, const DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type RandomAccessIteratorPair;
for (RandomAccessIteratorIterator rii = seqs_begin; rii != seqs_end; ++rii)
STXXL_DEBUG_ASSERT(stxxl::is_sorted((*rii).first, (*rii).second, comp));
// leave only non-empty sequences
std::vector<RandomAccessIteratorPair> seqs_ne;
seqs_ne.reserve(seqs_end - seqs_begin);
DiffType total_length = 0;
for (RandomAccessIteratorIterator raii = seqs_begin; raii != seqs_end; ++raii)
{
DiffType length = iterpair_size(*raii);
if (length > 0) {
total_length += length;
seqs_ne.push_back(*raii);
}
}
size_t num_seqs = seqs_ne.size();
STXXL_PARALLEL_PCALL(total_length);
if (total_length == 0 || num_seqs == 0)
return target;
thread_index_t num_threads = static_cast<thread_index_t>(
std::min(static_cast<DiffType>(SETTINGS::num_threads), total_length));
Timing<inactive_tag>* t = new Timing<inactive_tag>[num_threads];
for (int pr = 0; pr < num_threads; ++pr)
t[pr].tic();
// thread t will have to merge chunks[iam][0..k - 1]
std::vector<RandomAccessIteratorPair>* chunks
= new std::vector<RandomAccessIteratorPair>[num_threads];
for (int s = 0; s < num_threads; ++s)
chunks[s].resize(num_seqs);
#pragma omp parallel num_threads(num_threads)
{
#pragma omp single
{
if (SETTINGS::multiway_merge_splitting == SETTINGS::SAMPLING)
{
parallel_multiway_merge_sampling_splitting<Stable>(
seqs_ne.begin(), seqs_ne.end(),
length, total_length, comp,
chunks, num_threads);
}
else // (SETTINGS::multiway_merge_splitting == SETTINGS::EXACT)
{
parallel_multiway_merge_exact_splitting<Stable>(
seqs_ne.begin(), seqs_ne.end(),
length, total_length, comp,
chunks, num_threads);
}
}
thread_index_t iam = omp_get_thread_num();
t[iam].tic();
DiffType target_position = 0, local_length = 0;
for (size_t s = 0; s < num_seqs; ++s)
{
target_position += chunks[iam][s].first - seqs_ne[s].first;
local_length += iterpair_size(chunks[iam][s]);
}
sequential_multiway_merge<Stable, false>(
chunks[iam].begin(), chunks[iam].end(),
target + target_position,
std::min(local_length, length - target_position),
comp);
t[iam].tic();
}
for (int pr = 0; pr < num_threads; ++pr)
t[pr].tic();
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target + length, comp));
//update ends of sequences
size_t count_seqs = 0;
for (RandomAccessIteratorIterator raii = seqs_begin; raii != seqs_end; ++raii)
{
DiffType length = iterpair_size(*raii);
if (length > 0)
raii->first = chunks[num_threads - 1][count_seqs++].second;
}
STXXL_DEBUG_ASSERT(count_seqs == num_seqs);
delete[] chunks;
for (int pr = 0; pr < num_threads; ++pr)
t[pr].tic();
for (int pr = 0; pr < num_threads; ++pr)
t[pr].print();
delete[] t;
return target + length;
}
/*!
* Multi-way merging front-end with unstable mode and without sentinels.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (seqs_begin == seqs_end)
return target;
RandomAccessIterator3 target_end;
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
target_end = parallel_multiway_merge<false>(
seqs_begin, seqs_end, target, length, comp);
else
target_end = sequential_multiway_merge<false, false>(
seqs_begin, seqs_end, target, length, comp);
return target_end;
}
/*!
* Multi-way merging front-end with unstable mode and without sentinels.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_stable(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (seqs_begin == seqs_end)
return target;
RandomAccessIterator3 target_end;
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
target_end = parallel_multiway_merge<true>(
seqs_begin, seqs_end, target, length, comp);
else
target_end = sequential_multiway_merge<true, false>(
seqs_begin, seqs_end, target, length, comp);
return target_end;
}
/*!
* Multi-way merging front-end with unstable mode and sentinels.
*
* Each sequence must be suffixed with a sentinel as *end(), one item beyond
* the end of each sequence.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
* \pre For each \c i, \c seqs_begin[i].second must be the end marker of the
* sequence, but also reference the one more sentinel element.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_sentinels(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
if (seqs_begin == seqs_end)
return target;
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
return parallel_multiway_merge<false>(
seqs_begin, seqs_end, target, length, comp);
else
return sequential_multiway_merge<false, true>(
seqs_begin, seqs_end, target, length, comp);
}
/*!
* Multi-way merging front-end with unstable mode and sentinels.
*
* Each sequence must be suffixed with a sentinel as *end(), one item beyond
* the end of each sequence.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
* \pre For each \c i, \c seqs_begin[i].second must be the end marker of the
* sequence, but also reference the one more sentinel element.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_stable_sentinels(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
if (seqs_begin == seqs_end)
return target;
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
return parallel_multiway_merge<true>(
seqs_begin, seqs_end, target, length, comp);
else
return sequential_multiway_merge<true, true>(
seqs_begin, seqs_end, target, length, comp);
}
#endif // STXXL_PARALLEL
} // namespace parallel
STXXL_END_NAMESPACE
#endif // !STXXL_PARALLEL_MULTIWAY_MERGE_HEADER
|
convolutiondepthwise_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void convdw3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*9;
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
#if __ARM_NEON
float32x4_t _k012x = vld1q_f32(kernel0);
float32x4_t _k345x = vld1q_f32(kernel0+3);
float32x4_t _k678x = vld1q_f32(kernel0+6);
_k012x = vsetq_lane_f32(0.f, _k012x, 3);
_k345x = vsetq_lane_f32(0.f, _k345x, 3);
_k678x = vsetq_lane_f32(0.f, _k678x, 3);
float32x4_t _bias0 = vdupq_n_f32(bias0);
#else
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r00n = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r00n, 1);
float32x4_t _r02 = vextq_f32(_r00, _r00n, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r10n = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r10n, 1);
float32x4_t _r12 = vextq_f32(_r10, _r10n, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r20n = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r20n, 1);
float32x4_t _r22 = vextq_f32(_r20, _r20n, 2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r30n = vld1q_f32(r3 + 4);
float32x4_t _r31 = vextq_f32(_r30, _r30n, 1);
float32x4_t _r32 = vextq_f32(_r30, _r30n, 2);
float32x4_t _sum1 = vmulq_laneq_f32(_r00, _k012x, 0);
float32x4_t _sum2 = vmlaq_laneq_f32(_bias0, _r01, _k012x, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r02, _k012x, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r10, _k345x, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r11, _k345x, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r12, _k345x, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r20, _k678x, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r21, _k678x, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r22, _k678x, 2);
float32x4_t _sum3 = vmulq_laneq_f32(_r10, _k012x, 0);
float32x4_t _sum4 = vmlaq_laneq_f32(_bias0, _r11, _k012x, 1);
_sum3 = vfmaq_laneq_f32(_sum3, _r12, _k012x, 2);
_sum4 = vfmaq_laneq_f32(_sum4, _r20, _k345x, 0);
_sum3 = vfmaq_laneq_f32(_sum3, _r21, _k345x, 1);
_sum4 = vfmaq_laneq_f32(_sum4, _r22, _k345x, 2);
_sum3 = vfmaq_laneq_f32(_sum3, _r30, _k678x, 0);
_sum4 = vfmaq_laneq_f32(_sum4, _r31, _k678x, 1);
_sum3 = vfmaq_laneq_f32(_sum3, _r32, _k678x, 2);
_sum1 = vaddq_f32(_sum1, _sum2);
_sum3 = vaddq_f32(_sum3, _sum4);
vst1q_f32(outptr, _sum1);
vst1q_f32(outptr2, _sum3);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
outptr += 4;
outptr2 += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n"// r0
"add %3, #16 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"0: \n"
"vmul.f32 q7, q9, %e14[0] \n"
"vand q13, %q17, %q17 \n"// q13 = _bias0
"vmul.f32 q6, q11, %e14[1] \n"
"vmla.f32 q13, q12, %f14[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d18-d20}, [%4] \n"// r1
"add %4, #16 \n"
"vmla.f32 q7, q9, %e15[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e15[1] \n"
"vmla.f32 q13, q12, %f15[0] \n"
"vmul.f32 q8, q9, %e14[0] \n"
"vand q15, %q17, %q17 \n"// q15 = _bias0
"vmul.f32 q14, q11, %e14[1] \n"
"vmla.f32 q15, q12, %f14[0] \n"
"pld [%5, #192] \n"
"vld1.f32 {d18-d20}, [%5 :64] \n"// r2
"add %5, #16 \n"
"vmla.f32 q7, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e16[1] \n"
"vmla.f32 q13, q12, %f16[0] \n"
"vmla.f32 q8, q9, %e15[0] \n"
"vmla.f32 q14, q11, %e15[1] \n"
"vmla.f32 q15, q12, %f15[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d18-d20}, [%6] \n"// r3
"add %6, #16 \n"
"vmla.f32 q8, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q14, q11, %e16[1] \n"
"vmla.f32 q15, q12, %f16[0] \n"
"vadd.f32 q7, q7, q6 \n"
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n"// r0
"vadd.f32 q8, q8, q14 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q8, q8, q15 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"add %3, #16 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"vst1.f32 {d16-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %3, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k012x), // %14
"w"(_k345x), // %15
"w"(_k678x), // %16
"w"(_bias0) // %17
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _sum = vmulq_f32(_r00, _k012x);
_sum = vmlaq_f32(_sum, _r10, _k345x);
_sum = vmlaq_f32(_sum, _r20, _k678x);
float32x4_t _sum2 = vmulq_f32(_r10, _k012x);
_sum2 = vmlaq_f32(_sum2, _r20, _k345x);
_sum2 = vmlaq_f32(_sum2, _r30, _k678x);
_sum = vsetq_lane_f32(bias0, _sum, 3);
_sum2 = vsetq_lane_f32(bias0, _sum2, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
*outptr2 = vaddvq_f32(_sum2);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _sss2 = vpadd_f32(_ss, _ss2);
*outptr = vget_lane_f32(_sss2, 0);
*outptr2 = vget_lane_f32(_sss2, 1);
#endif // __aarch64__
#else
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
float sum2 = bias0;
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr = sum;
*outptr2 = sum2;
#endif
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r00n = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r00n, 1);
float32x4_t _r02 = vextq_f32(_r00, _r00n, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r10n = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r10n, 1);
float32x4_t _r12 = vextq_f32(_r10, _r10n, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r20n = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r20n, 1);
float32x4_t _r22 = vextq_f32(_r20, _r20n, 2);
float32x4_t _sum1 = vmulq_laneq_f32(_r00, _k012x, 0);
float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k012x, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r02, _k012x, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r10, _k345x, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r11, _k345x, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r12, _k345x, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r20, _k678x, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r21, _k678x, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r22, _k678x, 2);
_sum1 = vaddq_f32(_sum1, _sum2);
vst1q_f32(outptr, _sum1);
r0 += 4;
r1 += 4;
r2 += 4;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n"// r0
"add %2, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"0: \n"
"vmul.f32 q7, q8, %e10[0] \n"
"vand q14, %q13, %q13 \n"// q14 = _bias0
"vmul.f32 q13, q10, %e10[1] \n"
"vmla.f32 q14, q11, %f10[0] \n"
"pld [%3, #192] \n"
"vld1.f32 {d16-d18}, [%3] \n"// r1
"add %3, #16 \n"
"vmla.f32 q7, q8, %e11[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e11[1] \n"
"vmla.f32 q14, q11, %f11[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d16-d18}, [%4] \n"// r2
"add %4, #16 \n"
"vmla.f32 q7, q8, %e12[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e12[1] \n"
"vmla.f32 q14, q11, %f12[0] \n"
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n"// r0
"add %2, #16 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q7, q7, q14 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k012x), // %10
"w"(_k345x), // %11
"w"(_k678x), // %12
"w"(_bias0) // %13
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k012x);
_sum = vmlaq_f32(_sum, _r10, _k345x);
_sum = vmlaq_f32(_sum, _r20, _k678x);
_sum = vsetq_lane_f32(bias0, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
#endif
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*9;
float* outptr = out;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
#if __ARM_NEON
float32x4_t _k012x = vld1q_f32(kernel0);
float32x4_t _k345x = vld1q_f32(kernel0+3);
float32x4_t _k678x = vld1q_f32(kernel0+6);
_k012x = vsetq_lane_f32(0.f, _k012x, 3);
_k345x = vsetq_lane_f32(0.f, _k345x, 3);
_k678x = vsetq_lane_f32(0.f, _k678x, 3);
float32x4_t _bias0 = vdupq_n_f32(bias0);
#else
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4x2_t _r0 = vld2q_f32(r0);
float32x4x2_t _r0n = vld2q_f32(r0+8);
float32x4_t _r00 = _r0.val[0];// 0 2 4 6
float32x4_t _r01 = _r0.val[1];// 1 3 5 7
float32x4_t _r02 = vextq_f32(_r00, _r0n.val[0], 1);// 2 4 6 8
float32x4_t _outp = vfmaq_laneq_f32(_bias0, _r00, _k012x, 0);
_outp = vfmaq_laneq_f32(_outp, _r01, _k012x, 1);
_outp = vfmaq_laneq_f32(_outp, _r02, _k012x, 2);
float32x4x2_t _r1 = vld2q_f32(r1);
float32x4x2_t _r1n = vld2q_f32(r1+8);
float32x4_t _r10 = _r1.val[0];
float32x4_t _r11 = _r1.val[1];
float32x4_t _r12 = vextq_f32(_r10, _r1n.val[0], 1);
_outp = vfmaq_laneq_f32(_outp, _r10, _k345x, 0);
_outp = vfmaq_laneq_f32(_outp, _r11, _k345x, 1);
_outp = vfmaq_laneq_f32(_outp, _r12, _k345x, 2);
float32x4x2_t _r2 = vld2q_f32(r2);
float32x4x2_t _r2n = vld2q_f32(r2+8);
float32x4_t _r20 = _r2.val[0];
float32x4_t _r21 = _r2.val[1];
float32x4_t _r22 = vextq_f32(_r20, _r2n.val[0], 1);
_outp = vfmaq_laneq_f32(_outp, _r20, _k678x, 0);
_outp = vfmaq_laneq_f32(_outp, _r21, _k678x, 1);
_outp = vfmaq_laneq_f32(_outp, _r22, _k678x, 2);
vst1q_f32(outptr, _outp);
r0 += 8;
r1 += 8;
r2 += 8;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vand q11, %q13, %q13 \n"
"0: \n"
"vmul.f32 q0, q2, %e10[0] \n"
"vmul.f32 q10, q3, %e10[1] \n"
"pld [%2, #128] \n"
"vld2.f32 {d16-d17}, [%2] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f10[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vmla.f32 q0, q2, %e11[0] \n"
"vmla.f32 q10, q3, %e11[1] \n"
"pld [%3, #128] \n"
"vld2.f32 {d16-d17}, [%3] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f11[0] \n"
"pld [%4, #256] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vmla.f32 q0, q2, %e12[0] \n"
"vmla.f32 q10, q3, %e12[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d16-d17}, [%4] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f12[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vadd.f32 q0, q0, q10 \n"
"vadd.f32 q0, q0, q11 \n"
"vand q11, %q13, %q13 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k012x), // %10
"w"(_k345x), // %11
"w"(_k678x), // %12
"w"(_bias0) // %13
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k012x);
_sum = vmlaq_f32(_sum, _r10, _k345x);
_sum = vmlaq_f32(_sum, _r20, _k678x);
_sum = vsetq_lane_f32(bias0, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
csr_matvec.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
/* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */
HYPRE_Int
hypre_CSRMatrixMatvecOutOfPlaceHost( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset;
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A) - offset;
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
/*HYPRE_Int num_nnz = hypre_CSRMatrixNumNonzeros(A);*/
HYPRE_Int *A_rownnz = hypre_CSRMatrixRownnz(A);
HYPRE_Int num_rownnz = hypre_CSRMatrixNumRownnz(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *b_data = hypre_VectorData(b) + offset;
HYPRE_Complex *y_data = hypre_VectorData(y) + offset;
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int b_size = hypre_VectorSize(b) - offset;
HYPRE_Int y_size = hypre_VectorSize(y) - offset;
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
/*HYPRE_Int idxstride_b = hypre_VectorIndexStride(b);
HYPRE_Int vecstride_b = hypre_VectorVectorStride(b);*/
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp, tempx;
HYPRE_Int i, j, jj, m, ierr=0;
HYPRE_Real xpar=0.7;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
hypre_assert( num_vectors == hypre_VectorNumVectors(b) );
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size || num_rows != b_size)
ierr = 2;
if (num_cols != x_size && (num_rows != y_size || num_rows != b_size))
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = beta*b_data[i];
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */
if (num_rownnz < xpar*(num_rows) || num_vectors > 1)
{
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = b_data[i]*temp;
}
}
else
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = b_data[i];
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
if (num_rownnz < xpar*(num_rows))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = 0;
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] += tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = 0;
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] += tempx;
}
}
}
else // num_vectors > 1
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (j = 0; j < num_vectors; ++j)
{
tempx = 0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
}
y_data[ j*vecstride_y + i*idxstride_y ] += tempx;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
}
else
{ // JSP: this is currently the only path optimized
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,tempx)
#endif
{
HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A);
HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A);
hypre_assert(iBegin <= iEnd);
hypre_assert(iBegin >= 0 && iBegin <= num_rows);
hypre_assert(iEnd >= 0 && iEnd <= num_rows);
if (0 == temp)
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*A*x
} // temp == 0
else if (-1 == temp) // beta == -alpha
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x - y
else if (-1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x + y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x - y)
} // temp == -1
else if (1 == temp)
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x + y
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x - y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x + y)
}
else
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x + temp*y
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x - temp*y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x + temp*y)
} // temp != 0 && temp != -1 && temp != 1
} // omp parallel
}
if (x == y)
{
hypre_SeqVectorDestroy(x_tmp);
}
return ierr;
}
HYPRE_Int
hypre_CSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
#ifdef HYPRE_PROFILE
HYPRE_Real time_begin = hypre_MPI_Wtime();
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_GPU)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
//RL: TODO back to hypre_GetExecPolicy1 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_CSRMatrixMatvecDevice(0, alpha, A, x, beta, b, y, offset);
}
else
#endif
{
ierr = hypre_CSRMatrixMatvecOutOfPlaceHost(alpha, A, x, beta, b, y, offset);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
HYPRE_Int
hypre_CSRMatrixMatvec( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
return hypre_CSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y, 0);
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* This version is using a different (more efficient) threading scheme
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvecTHost( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp;
HYPRE_Complex *y_data_expand;
HYPRE_Int my_thread_num = 0, offset = 0;
HYPRE_Int i, j, jv, jj;
HYPRE_Int num_threads;
HYPRE_Int ierr = 0;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
ierr = 1;
if (num_cols != y_size)
ierr = 2;
if (num_rows != x_size && num_cols != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
y_data_expand = hypre_CTAlloc(HYPRE_Complex, num_threads*y_size, HYPRE_MEMORY_HOST);
if ( num_vectors==1 )
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,j,my_thread_num,offset)
#endif
{
my_thread_num = hypre_GetThreadNum();
offset = y_size*my_thread_num;
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data_expand[offset + j] += A_data[jj] * x_data[i];
}
}
/* implied barrier (for threads)*/
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < y_size; i++)
{
for (j = 0; j < num_threads; j++)
{
y_data[i] += y_data_expand[j*y_size + i];
}
}
} /* end parallel threaded region */
}
else
{
/* multiple vector case is not threaded */
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
hypre_TFree(y_data_expand, HYPRE_MEMORY_HOST);
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
{
y_data[i] *= alpha;
}
}
if (x == y)
{
hypre_SeqVectorDestroy(x_tmp);
}
return ierr;
}
HYPRE_Int
hypre_CSRMatrixMatvecT( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
HYPRE_Real time_begin = hypre_MPI_Wtime();
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_GPU)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
//RL: TODO back to hypre_GetExecPolicy1 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_CSRMatrixMatvecDevice(1, alpha, A, x, beta, y, y, 0 );
}
else
#endif
{
ierr = hypre_CSRMatrixMatvecTHost(alpha, A, x, beta, y);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvec_FF( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y,
HYPRE_Int *CF_marker_x,
HYPRE_Int *CF_marker_y,
HYPRE_Int fpt )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Complex temp;
HYPRE_Int i, jj;
HYPRE_Int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= alpha;
}
return ierr;
}
|
TimeDiscretization.h | /**
* @file TimeDiscretization.h
* @author N. Fottner
* @date 13/05/19
*/
#pragma once
#include "autopas/AutoPas.h"
#include "autopas/utils/ArrayMath.h"
/**
* Functions for updating velocities and positions as simulation time progresses.
*/
namespace TimeDiscretization {
/**
* Calculate and update the position for every particle using the Störmer-Verlet Algorithm.
* @param autopas
* @param particlePropertiesLibrary
* @param deltaT time step width
*/
template <class AutoPasTemplate, class ParticlePropertiesLibraryTemplate>
void calculatePositions(AutoPasTemplate &autopas, const ParticlePropertiesLibraryTemplate &particlePropertiesLibrary,
const double deltaT) {
#ifdef AUTOPAS_OPENMP
#pragma omp parallel
#endif
for (auto iter = autopas.begin(autopas::IteratorBehavior::ownedOnly); iter.isValid(); ++iter) {
auto v = iter->getV();
auto m = particlePropertiesLibrary.getMass(iter->getTypeId());
auto f = iter->getF();
iter->setOldF(f);
iter->setF({0., 0., 0.});
v = autopas::utils::ArrayMath::mulScalar(v, deltaT);
f = autopas::utils::ArrayMath::mulScalar(f, (deltaT * deltaT / (2 * m)));
auto newR = autopas::utils::ArrayMath::add(v, f);
iter->addR(newR);
}
}
/**
* Calculate and update the velocity for every particle using the the Störmer-Verlet Algorithm.
* @param autopas
* @param particlePropertiesLibrary
* @param deltaT time step width
*/
template <class AutoPasTemplate, class ParticlePropertiesLibraryTemplate>
void calculateVelocities(AutoPasTemplate &autopas, const ParticlePropertiesLibraryTemplate &particlePropertiesLibrary,
const double deltaT) {
#ifdef AUTOPAS_OPENMP
#pragma omp parallel
#endif
for (auto iter = autopas.begin(autopas::IteratorBehavior::ownedOnly); iter.isValid(); ++iter) {
auto m = particlePropertiesLibrary.getMass(iter->getTypeId());
auto force = iter->getF();
auto old_force = iter->getOldf();
auto newV =
autopas::utils::ArrayMath::mulScalar((autopas::utils::ArrayMath::add(force, old_force)), deltaT / (2 * m));
iter->addV(newV);
}
}
}; // namespace TimeDiscretization
|
ast-dump-openmp-target-teams-distribute-parallel-for.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target teams distribute parallel for
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target teams distribute parallel for
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target teams distribute parallel for collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target teams distribute parallel for collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target teams distribute parallel for collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForDirective {{.*}} <line:4:1, col:49>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForDirective {{.*}} <line:10:1, col:49>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForDirective {{.*}} <line:17:1, col:61>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:50, col:60>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:59> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:59> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForDirective {{.*}} <line:24:1, col:61>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:50, col:60>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:59> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:59> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetTeamsDistributeParallelForDirective {{.*}} <line:31:1, col:61>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:50, col:60>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:59> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:59> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
matrix_bits.h | #ifndef MATRIX_BITS_H_
#define MATRIX_BITS_H_
namespace acspo {
template <typename T>
matrix<T> & operator&=(matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
m1ptr[i] &= m2ptr[i];
}
return mat1;
}
template <typename T>
matrix<T> & operator|=(matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
m1ptr[i] |= m2ptr[i];
}
return mat1;
}
template <typename T>
matrix<T> & operator^=(matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
m1ptr[i] ^= m2ptr[i];
}
return mat1;
}
template <typename T, typename S>
matrix<T> & operator&=(matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
mptr[i] &= val;
}
return mat;
}
template <typename T, typename S>
matrix<T> & operator|=(matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
mptr[i] |= val;
}
return mat;
}
template <typename T, typename S>
matrix<T> & operator^=(matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
mptr[i] *= val;
}
return mat;
}
template <typename T>
matrix<T> operator&(const matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
matrix<T> ret(mat1.size());
T *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] & m2ptr[i];
}
return ret;
}
template <typename T>
matrix<T> operator|(const matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
matrix<T> ret(mat1.size());
T *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] | m2ptr[i];
}
return ret;
}
template <typename T>
matrix<T> operator^(const matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
matrix<T> ret(mat1.size());
T *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] ^ m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<T> operator&(const matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
matrix<T> ret(mat.size());
T *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] & val;
}
return ret;
}
template <typename T, typename S>
matrix<T> operator|(const matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
matrix<T> ret(mat.size());
T *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] | val;
}
return ret;
}
template <typename T, typename S>
matrix<T> operator^(const matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
matrix<T> ret(mat.size());
T *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] ^ val;
}
return ret;
}
template <typename T, typename S>
matrix<T> operator&(const S &val, const matrix<T> &mat)
{
return mat & val;
}
template <typename T, typename S>
matrix<T> operator|(const S &val, const matrix<T> &mat)
{
return mat | val;
}
template <typename T, typename S>
matrix<T> operator^(const S &val, const matrix<T> &mat)
{
return mat ^ val;
}
}
#endif
|
sequence2batch.h | #ifndef ANAKIN_SABER_FUNC_IMPL_X86_MATH_SEQUENCE_BATCH_H
#define ANAKIN_SABER_FUNC_IMPL_X86_MATH_SEQUENCE_BATCH_H
#include <algorithm>
#include <vector>
#include "saber/core/tensor.h"
#ifdef USE_OPENMP
#include "omp.h"
#endif
namespace anakin {
namespace saber {
namespace math {
template <DataType Dtype, typename LayOutType>
class CopyMatrixRowsFunctor {
public:
typedef Tensor<X86, Dtype, LayOutType> ioTensor;
typedef typename ioTensor::Dtype dtype;
// If is_src_index is true,
// copy the indexed rows of input src to the output dst.
// If is_src_index is false,
// copy the input src to the indexed rows of output dst.
// The indexed rows are based on the input index.
void operator()(ioTensor* src,
std::vector<int> index_lod, ioTensor* dst,
bool is_src_index, int fragment_num);
};
template <DataType Dtype, typename LayOutType>
class Seq2BatchFunctor {
// Calculate the length of each sequence and
// sort sequence index by the length.
// example: sequences = {s0, s1, s2}
// s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
// seq_info[3] = {(4, 5, 1), (0, 4, 0), (9, 3, 2)}
//
struct SeqInfo {
SeqInfo(int start, int length, int seq_idx)
: start(start), length(length), seq_idx(seq_idx) {}
int start;
int length;
int seq_idx;
};
public:
typedef Tensor<X86, Dtype, LayOutType> ioTensor;
void operator()(ioTensor* seq,
ioTensor* batch, std::vector<std::vector<int>>& seq_to_batch_meta, bool is_cal_batch_lod,
bool is_reverse = false, int fragment_num = 1) const {
if (!is_cal_batch_lod) {
if (seq_to_batch_meta.size() < 2) {
LOG(ERROR) << "The size of seq_to_batch_meta should inlcude at least 2-level sequence information.";
exit(-1);
}
if (seq_to_batch_meta[1].size() != static_cast<int>(seq->num())) {
LOG(ERROR) << "The seq_to_batch information should be consistent with the dims.";
exit(-1);
}
CopyMatrixRowsFunctor<Dtype, LayOutType> to_batch;
to_batch(seq, seq_to_batch_meta[1], batch, true, fragment_num);
return;
}
if (seq_to_batch_meta.size() != 1) {
LOG(ERROR) << "Only support one level sequence now.";
exit(-1);
}
auto seq_meta = seq_to_batch_meta[0];
std::vector<SeqInfo> seq_info;
for (int seq_id = 0; seq_id < seq_meta.size() - 1; ++seq_id) {
int length = seq_meta[seq_id + 1] - seq_meta[seq_id];
seq_info.emplace_back(seq_meta[seq_id], length, seq_id);
//LOG(INFO) << "seq_meta[seq_id]:" << seq_meta[seq_id] << " length:" << length << " seq_id:" <<seq_id;
}
std::sort(seq_info.begin(), seq_info.end(),
[](SeqInfo a, SeqInfo b) {
return a.length > b.length;
});
// Calculate the start position of each batch.
// example: sequences = {s0, s1, s2}
// s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
// num_batch = 5,
// batchIndex = {b0, b1, b2, b3, b4}
// b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1
// batch_start_positions[6] = {0, 3, 6, 9, 11, 12}
// batch_start_positions[0] = len(b0)
// batch_start_positions[1] = len(b0) + len(b1)
// batch_start_positions[2] = len(b0) + len(b1) + len(b2)
// ...
// seq2batch_idx[12] = {4, 0, 9,
// 5, 1, 10,
// 6, 2, 11,
// 7, 3,
// 8}
// seq_order = {1, 0, 2}, the sort order.
// where 1 is the second sequence,
// 0 is the first sequence,
// 2 is the third sequence.
// The num_batch represents batch size after rearranging the
// input LodTensor. It is also the maximum length of input sequence.
std::vector<std::vector<int>> batch_seq_meta;
batch_seq_meta.emplace_back(std::vector<int> {0});
batch_seq_meta.emplace_back(std::vector<int> {0});
batch_seq_meta.emplace_back(std::vector<int> {0});
// batch_seq_meta[0] is the start positions for batch LoDTensor
int num_batch = seq_info[0].length;
batch_seq_meta[0].resize(static_cast<int>(num_batch + 1));
// batch_seq_meta[1] is the raw index in the input LoDTensor
batch_seq_meta[1].resize(static_cast<int>(seq->num()));
// batch_seq_meta[2] is the sort order for the input LoDTensor.
batch_seq_meta[2].resize(seq_info.size());
int* batch_starts = batch_seq_meta[0].data();
int* seq2batch_idx = batch_seq_meta[1].data();
batch_starts[0] = 0;
for (int n = 0; n < num_batch; n++) {
auto batch_id = static_cast<int>(batch_starts[n]);
for (int i = 0; i < seq_info.size(); ++i) {
int seq_len = seq_info[i].length;
int start = seq_info[i].start;
if (n < seq_len) {
seq2batch_idx[batch_id] =
is_reverse ? start + seq_len - 1 - n : start + n;
batch_id++;
} else {
break;
}
}
batch_starts[n + 1] = static_cast<int>(batch_id);
}
int* seq_order = batch_seq_meta[2].data();
for (int i = 0; i < seq_info.size(); ++i) {
seq_order[i] = seq_info[i].seq_idx;
}
seq_to_batch_meta = batch_seq_meta;
CopyMatrixRowsFunctor<Dtype, LayOutType> to_batch;
to_batch(seq, batch_seq_meta[1], batch, true, fragment_num);
}
};
template <DataType Dtype, typename LayOutType>
class Batch2SeqFunctor {
public:
typedef Tensor<X86, Dtype, LayOutType> ioTensor;
void operator()(ioTensor* batch,
ioTensor* seq, std::vector<std::vector<int>>& seq_to_batch_meta, int fragment_num = 1) const {
if (seq_to_batch_meta.size() < 2) {
LOG(ERROR) << "The size of seq_to_batch_meta should inlcude at least 2-level sequence information.";
exit(-1);
}
if (seq_to_batch_meta[1].size() != static_cast<int>(seq->num())) {
LOG(ERROR) << "The seq_to_batch information should be consistent with the dims.";
exit(-1);
}
CopyMatrixRowsFunctor<Dtype, LayOutType> to_seq;
to_seq(batch, seq_to_batch_meta[1], seq, false, fragment_num);
}
};
template <DataType Dtype, typename LayOutType>
class ReorderInitState {
public:
typedef Tensor<X86, Dtype, LayOutType> ioTensor;
void operator()(ioTensor* src, std::vector<int> ind_lod, ioTensor* dst, bool indexed_src,
int fragment_num = 1) {
math::CopyMatrixRowsFunctor<Dtype, LayOutType> row_shuffle;
row_shuffle(src, ind_lod, dst, indexed_src, fragment_num);
}
};
/*
* This class can used to modify the matrix structure of sequence matrix into
* batch structure.
* sequence matrix: [C1_s ... Cn_s | ...... | C1_t ... Cn_t]
* batch matrix: [C1_s ... C1_t | ...... | Cn_s ... Cn_t]
* Cn_s is the state for sequence s at time n.
*
* Exampel: sequence matrix = {{0, 0, 0, 0}, {1, 1, 1, 1, 1}, {2, 2, 2}}
* s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
* batch matrix = {{1, 0, 2}, {1, 0, 2}, {1, 0, 2}, {1, 0}, {1}}
* b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1
*
* Use:
* Input: seqMatrix, seqStarts(Sequence Start Positions)
* Output: batchMatrix
* 1. SequenceToBatch seq2batch;
* 2. seq2batch.resizeOrCreateBatch(seqStarts); // calculate seq2BatchIdx
* 3. seq2batch.copy(seqMatrix, batchMatrix, true); // copy seq to batch matrix
*
*/
class SequenceToBatch {
public:
SequenceToBatch() {};
template <typename Dtype>
void seq_2_bat(const Dtype* input, Dtype* output, int word_size) {
int word_sum = seq2BatchIdx_.size();
#pragma omp parallel for if(thread_num > 1)
for (int old_id = 0; old_id < word_sum; ++old_id) {
int word_start = old_id * word_size;
int maped_id = seq2BatchIdx_[old_id];
int maped_start = maped_id * word_size;
for (int word_offset = 0; word_offset < word_size; ++word_offset) {
output[word_start + word_offset] = input[maped_start + word_offset];
}
}
}
template <typename Dtype>
void hidden_2_bat(const Dtype* input, Dtype* output, int hidden_size) {
int batch_size = seqStartAndLength_.size();
for (int old_id = 0; old_id < batch_size; ++old_id) {
int word_start = old_id * hidden_size;
int maped_id = seqStartAndLength_[old_id].seqIdx_;
int maped_start = maped_id * hidden_size;
for (int word_offset = 0; word_offset < hidden_size; ++word_offset) {
output[word_start + word_offset] = input[maped_start + word_offset];
}
}
}
template <typename Dtype>
void bat_2_seq(const Dtype* input, Dtype* output, int hidden_size) {
int word_sum = seq2BatchIdx_.size();
#pragma omp parallel for if(thread_num > 1)
for (int old_id = 0; old_id < word_sum; old_id++) {
int word_start = old_id * hidden_size;
int maped_id = seq2BatchIdx_[old_id];
int maped_start = maped_id * hidden_size;
for (int word_offset = 0; word_offset < hidden_size; word_offset++) {
output[maped_start + word_offset] = input[word_start + word_offset];
}
}
}
template <typename Dtype>
void bat_2_seq(const Dtype* input, Dtype* output, int hidden_size, int aligned_hidden_size) {
int word_sum = seq2BatchIdx_.size();
#pragma omp parallel for if(thread_num > 1)
for (int old_id = 0; old_id < word_sum; old_id++) {
int word_start = old_id * aligned_hidden_size;
int maped_id = seq2BatchIdx_[old_id];
int maped_start = maped_id * hidden_size;
for (int word_offset = 0; word_offset < hidden_size; word_offset++) {
output[maped_start + word_offset] = input[word_start + word_offset];
}
}
}
void get_batch_offset(std::vector<int>& bat_offset) {
for (size_t i = 0; i < batchStartPositions_.size(); i++) {
bat_offset[i] = batchStartPositions_[i];
}
}
size_t get_batch_num() const {
return numBatch_;
}
void create_batch(int batchSize, size_t numSequences, std::vector<int>& seqStarts,
bool reversed) {
CHECK_EQ(seqStarts[numSequences], batchSize);
seq2BatchIdx_.resize(batchSize);
/*
* calculate the length of each sequence & sort sequence index by the length
* Exampel: Sequences = {s0, s1, s2}
* s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
* seqStartAndLength_[3] = {(4, 5, 1), (0, 4, 0), (9, 3, 2)}
*/
for (size_t seqId = 0; seqId < numSequences; ++seqId) {
int length = seqStarts[seqId + 1] - seqStarts[seqId];
seqStartAndLength_.emplace_back(seqStarts[seqId], length, seqId);
}
std::sort(seqStartAndLength_.begin(), seqStartAndLength_.end(),
[](SeqStartAndLength a, SeqStartAndLength b) {
return a.length_ > b.length_;
});
/*
* calculate the start position of each batch
* (numBatch equal the maxLength of sequences)
* Exampel: Sequences = {s0, s1, s2}
* s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2
* numBatch = 5,
* batchIndex = {b0, b1, b2, b3, b4}
* b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1
* batchStartPositions[6] = {0, 3, 6, 9, 11, 12}
*/
numBatch_ = (size_t)seqStartAndLength_[0].length_;
batchStartPositions_.resize(numBatch_ + 1);
batchStartPositions_[0] = 0;
for (size_t n = 0; n < numBatch_; n++) {
int batchId = batchStartPositions_[n];
for (size_t i = 0; i < seqStartAndLength_.size(); ++i) {
size_t seqLength = seqStartAndLength_[i].length_;
int start = seqStartAndLength_[i].start_;
if (n < seqLength) {
if (!reversed) {
seq2BatchIdx_[batchId] = start + n;
} else {
seq2BatchIdx_[batchId] = start + seqLength - 1 - n;
}
batchId++;
} else {
break;
}
}
batchStartPositions_[n + 1] = batchId;
}
}
protected:
struct SeqStartAndLength {
int start_;
int length_;
int seqIdx_;
SeqStartAndLength(int start, int length, int seqIdx)
: start_(start), length_(length), seqIdx_(seqIdx) {}
};
std::vector<SeqStartAndLength> seqStartAndLength_;
std::vector<int> batchStartPositions_;
std::vector<int> seq2BatchIdx_;
size_t numBatch_;
#ifdef USE_OPENMP
int thread_num = omp_get_max_threads();
#endif
};
} // namespace math
} // namespace saber
} // namespace anakin
#endif
|
GB_binop__le_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int16)
// A*D function (colscale): GB (_AxD__le_int16)
// D*A function (rowscale): GB (_DxB__le_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int16)
// C=scalar+B GB (_bind1st__le_int16)
// C=scalar+B' GB (_bind1st_tran__le_int16)
// C=A+scalar GB (_bind2nd__le_int16)
// C=A'+scalar GB (_bind2nd_tran__le_int16)
// C type: bool
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT16 || GxB_NO_LE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
morn_image_geometry.c | /*
Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com>
Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <math.h>
#include "morn_image.h"
void ImagePolygonBorder(MArray *border,int height,int width,MList *polygon)
{
mException(INVALID_POINTER(polygon),EXIT,"invalid input polygon");
mException((polygon->num<3),EXIT,"invalid input polygon");
mException(INVALID_POINTER(border),EXIT,"invalid input");
mArrayRedefine(border,height*2,sizeof(short),border->dataS16);
MImagePoint **point = (MImagePoint **)(polygon->data);
char *str;
str=&(border->info.name[0][0]);if(strlen(str)>0) mException((strcmp(str,"y1" )!=0),EXIT,"invalid border");else strcpy(str,"y1");
str=&(border->info.name[1][0]);if(strlen(str)>0) mException((strcmp(str,"y2" )!=0),EXIT,"invalid border");else strcpy(str,"y2");
str=&(border->info.name[2][0]);if(strlen(str)>0) mException((strcmp(str,"height")!=0),EXIT,"invalid border");else strcpy(str,"height");
str=&(border->info.name[3][0]);if(strlen(str)>0) mException((strcmp(str,"width" )!=0),EXIT,"invalid border");else strcpy(str,"width");
int y1 = (int)(point[0]->y+0.5f);int y2 = (int)(point[0]->y+0.5f);
for(int n=1;n<polygon->num;n++)
{y1 = MIN(y1,(int)(point[n]->y+0.5f));y2 = MAX(y2,(int)(point[n]->y+0.5f));}
y1 = MAX(y1,0);y2 = MIN(y2+1,height);
border->info.value[0]=y1;border->info.value[1]=y2;border->info.value[2]=height;border->info.value[3]=width;
memset(border->dataS16,0,border->num*sizeof(short));
for(int n=0;n<polygon->num;n++)
{
MImagePoint *p1,*p2;
int lx,ly;
p1 = point[n];
if(n+1<polygon->num)
p2 = point[n+1];
else
p2 = point[0];
if(p1->y==p2->y)
{
ly=(short)(p1->y + 0.5f);
if((ly>=0)&&(ly<height))
{
if(p1->x<p2->x)
{
border->dataS16[ly+ly ] = p1->x;
border->dataS16[ly+ly+1] = p2->x;
}
else
{
border->dataS16[ly+ly ] = p2->x;
border->dataS16[ly+ly+1] = p1->x;
}
}
continue;
}
float x_locate = (p1->x);
float step = (p2->x - p1->x)/(p2->y - p1->y);
step = (p1->y>p2->y)?(0.0f-step):step;
for(ly=(int)(p1->y+0.5f);;ly=((p1->y>p2->y)?(ly-1):(ly+1)))
{
lx = (float)(x_locate+0.5);
if(lx<0) lx=0; else if(lx>width) lx=width;
if((ly>=0)&&(ly<height))
{
if( border->dataS16[ly+ly] == 0)
border->dataS16[ly+ly] = lx;
else if(lx<border->dataS16[ly+ly])
{
border->dataS16[ly+ly+1] = border->dataS16[ly+ly];
border->dataS16[ly+ly ] = lx;
}
else if(lx>border->dataS16[ly+ly+1])
border->dataS16[ly+ly+1] = lx;
}
if(ly==(int)(p2->y+0.5f))
break;
x_locate = x_locate + step;
}
}
}
// void _ImagePolygonBorder(MArray *border,int height,int width,int num,float x0,float y0,float x1,float y1,float x2,float y2,float x3,float y3,float x4,float y4,float x5,float y5,float x6,float y6,float x7,float y7)
// {
// mException(INVALID_POINTER(border),EXIT,"invalid input");
// mException((num<3),EXIT,"invalid input");
// float px[8]={x0,x1,x2,x3,x4,x5,x6,x7};
// float py[8]={y0,y1,y2,y3,y4,y5,y6,y7};
// MList *polygon = mListCreate(DFLT,NULL);
// mListPlace(polygon,NULL,num,sizeof(MImagePoint));
// MImagePoint **point = (MImagePoint **)(polygon->data);
// for(int i=0;i<num;i++)
// {
// point[i]->x = px[i];
// point[i]->y = py[i];
// }
// // printf("point[0]->x is %f,point[0]->y is %f\n",point[0]->x,point[0]->y);
// // printf("point[1]->x is %f,point[1]->y is %f\n",point[1]->x,point[1]->y);
// // printf("point[2]->x is %f,point[2]->y is %f\n",point[2]->x,point[2]->y);
// // printf("point[3]->x is %f,point[3]->y is %f\n",point[3]->x,point[3]->y);
// ImagePolygonBorder(border,height,width,polygon);
// mListRelease(polygon);
// }
void mImageRectBorder(MArray *border,int height,int width,int x1,int x2,int y1,int y2)
{
mException(INVALID_POINTER(border),EXIT,"invalid input");
mArrayRedefine(border,height*2,sizeof(short),border->dataS16);
int buff;
if(x1>x2) {buff=x1;x1=x2;x2=buff;} x1 = MAX(0,x1); x2 = MIN(width ,x2);
if(y1>y2) {buff=y1;y1=y2;y2=buff;} y1 = MAX(0,y1); y2 = MIN(height,y2);
char *str;
str=&(border->info.name[0][0]);if(strlen(str)>0) mException((strcmp(str,"y1" )!=0),EXIT,"invalid border");else strcpy(str,"y1");
str=&(border->info.name[1][0]);if(strlen(str)>0) mException((strcmp(str,"y2" )!=0),EXIT,"invalid border");else strcpy(str,"y2");
str=&(border->info.name[2][0]);if(strlen(str)>0) mException((strcmp(str,"height")!=0),EXIT,"invalid border");else strcpy(str,"height");
str=&(border->info.name[3][0]);if(strlen(str)>0) mException((strcmp(str,"width" )!=0),EXIT,"invalid border");else strcpy(str,"width");
border->info.value[0]=y1;border->info.value[0]=y2;border->info.value[0]=height;border->info.value[0]=width;
memset(border->dataS16,0,(y1+y1)*sizeof(short));
for(int i=y1;i<y2;i++)
{
border->dataS16[i+i ] = x1;
border->dataS16[i+i+1] = x2;
}
memset(border->dataS16+y2+y2,0,(height-y2)*2*sizeof(short));
}
void mLine(MList *line,float x1,float y1,float x2,float y2)
{
mException(INVALID_POINTER(line),EXIT,"invalid input");
mListPlace(line,NULL,2,sizeof(MImagePoint));
MImagePoint **point = (MImagePoint **)(line->data);
point[0]->x = x1;
point[0]->y = y1;
point[1]->x = x2;
point[1]->y = y2;
}
void mTriangle(MList *triangle,float x1,float y1,float x2,float y2,float x3,float y3)
{
mException(INVALID_POINTER(triangle),EXIT,"invalid input");
mListPlace(triangle,NULL,3,sizeof(MImagePoint));
MImagePoint **point = (MImagePoint **)(triangle->data);
point[0]->x = x1;
point[0]->y = y1;
point[1]->x = x2;
point[1]->y = y2;
point[2]->x = x3;
point[2]->y = y3;
}
void mQuadrangle(MList *quadrangle,float x1,float y1,float x2,float y2,float x3,float y3,float x4,float y4)
{
mException(INVALID_POINTER(quadrangle),EXIT,"invalid input");
mListPlace(quadrangle,NULL,4,sizeof(MImagePoint));
MImagePoint **point = (MImagePoint **)(quadrangle->data);
point[0]->x = x1;
point[0]->y = y1;
point[1]->x = x2;
point[1]->y = y2;
point[2]->x = x3;
point[2]->y = y3;
point[3]->x = x4;
point[3]->y = y4;
}
void mLineTravel(MImagePoint *p1,MImagePoint *p2,int stride,void (*func)(MImagePoint *,void *),void *para)
{
int i;float step;int num;
float x_min,x_max,y_min,y_max;
// printf("p1 is %f,%f,p2 is %f,%f\n",p1->x,p1->y,p2->x,p2->y);
if(ABS(p1->x-p2->x)>ABS(p1->y-p2->y))
{
if(p1->x==p2->x) return;
if(p1->x<p2->x){x_min=p1->x;x_max=p2->x;y_min=p1->y;y_max=p2->y;}
else {x_min=p2->x;x_max=p1->x;y_min=p2->y;y_max=p1->y;}
step = (p1->y-p2->y)/(p1->x-p2->x);step = step*stride;
num = (int)((x_max-x_min)/stride+0.5);num=MAX(num,1);
// #pragma omp parallel for
for(i=0;i<num;i++)
{
MImagePoint point;
point.x = x_min + i*stride;
point.y = y_min + i*step;
func(&point,para);
}
}
else
{
if(p1->y==p2->y) return;
if(p1->y<p2->y){x_min=p1->x;x_max=p2->x;y_min=p1->y;y_max=p2->y;}
else {x_min=p2->x;x_max=p1->x;y_min=p2->y;y_max=p1->y;}
step = (p1->x-p2->x)/(p1->y-p2->y);step = step*stride;
num = (int)((y_max-y_min)/stride+0.5);num=MAX(num,1);
// #pragma omp parallel for
for(i=0;i<num;i++)
{
MImagePoint point;
point.x = x_min + i*step;
point.y = y_min + i*stride;
func(&point,para);
}
}
}
void mPolygonSideTravel(MList *polygon,int stride,void (*func)(MImagePoint *,void *),void *para)
{
int i;
for(i=0;i<polygon->num-1;i++)
mLineTravel((MImagePoint *)(polygon->data[i]),(MImagePoint *)(polygon->data[i+1]),stride,func,para);
mLineTravel((MImagePoint *)(polygon->data[i]),(MImagePoint *)(polygon->data[0]),stride,func,para);
}
void mCurveTravel(MImageCurve *curve,int stride,void (*func)(MImagePoint *,void *),void *para)
{
int min,max;
if(curve->type<=0)
{
if(curve->v1.x<curve->v2.x) {min=curve->v1.x;max=curve->v2.x;}
else {min=curve->v2.x;max=curve->v1.x;}
for(int i=min;i<max;i+=stride)
{
MImagePoint p1;p1.x=i ;p1.y=curve->curve(p1.x,curve->para);
MImagePoint p2;p2.x=i+1;p2.y=curve->curve(p2.x,curve->para);
mLineTravel(&p1,&p2,stride,func,para);
}
}
else
{
if(curve->v1.y<curve->v2.y) {min=curve->v1.y;max=curve->v2.y;}
else {min=curve->v2.y;max=curve->v1.y;}
for(int i=min;i<max;i+=stride)
{
MImagePoint p1;p1.y=i ;p1.x=curve->curve(p1.y,curve->para);
MImagePoint p2;p2.y=i+1;p2.x=curve->curve(p2.y,curve->para);
mLineTravel(&p1,&p2,stride,func,para);
}
}
}
// void mCurveToPolyLine(MImageCurve *curve,MList *line)
// {
// MImage pt;
// if(curve->type<=0) {for(int i=curve->v1.x;i<=curve->v2.x;i++) {pt.x=i;pt.y=curve->curve(pt.x,curve->para);mListWrite(line,
// for(int
// }
/*
void mPolygon(MList *polygon,int num,...)
{
mException(INVALID_POINTER(polygon),EXIT,"invalid input");
if(num<0)
num = polygon->num;
mException((num<1),EXIT,"invalid input");
mListPlace(polygon,NULL,num,sizeof(MImagePoint));
MImagePoint **point = (MImagePoint **)(polygon->data);
va_list para;
va_start(para,num);
for(int i=0;i<num;i++)
{
point[i]->x = (float)va_arg(para,int);
point[i]->y = (float)va_arg(para,int);
}
va_end(para);
}*/
void _PolygonSetup(MList *polygon,int num,float x0,float y0,float x1,float y1,float x2,float y2,float x3,float y3,float x4,float y4,float x5,float y5,float x6,float y6,float x7,float y7)
{
mException(INVALID_POINTER(polygon),EXIT,"invalid input");
mException((num<=0)||(num>8),EXIT,"invalid input");
float px[8]={x0,x1,x2,x3,x4,x5,x6,x7};
float py[8]={y0,y1,y2,y3,y4,y5,y6,y7};
mListClear(polygon);
for(int i=0;i<num;i++)
{
MImagePoint point;point.x = px[i];point.y = py[i];
mListWrite(polygon,DFLT,&point,sizeof(MImagePoint));
}
mListWrite(polygon,DFLT,polygon->data[0],sizeof(MImagePoint));
polygon->num=polygon->num-1;
}
void mCurve(MImageCurve *curve,float i1,float i2,int type,float (*func)(float,float *),float *para)
{
curve->type = type;
curve->curve= func;
if(para!=NULL) memcpy(curve->para,para,16*sizeof(float));
// printf("i1 is %f,i2 is %f\n",i1,i2);
if(i2<i1) {int buff=i1;i1=i2;i2=buff;}
if(type<=0)
{
curve->v1.x=i1;curve->v1.y=func(i1,para);
curve->v2.x=i2;curve->v2.y=func(i2,para);
}
else
{
curve->v1.y=i1;curve->v1.x=func(i1,para);
curve->v2.y=i2;curve->v2.x=func(i2,para);
}
}
float mCurvePoint(MImageCurve *curve,float x)
{
return (curve->curve)(x,curve->para);
}
void mPoissonDiskPoint(MList *list,float r,float x1,float x2,float y1,float y2)
{
mException(list==NULL,EXIT,"invalid input");
float grid_size = 0.707106781*r;
float r2 = r*r;
int gw = ceil((x2-x1)/grid_size);int gh = ceil((y2-y1)/grid_size);
MImagePoint **grid = (MImagePoint **)mMalloc(gh*sizeof(MImagePoint *));
for(int j=0;j<gh;j++) {grid[j]=(MImagePoint *)mMalloc(gw*sizeof(MImagePoint));memset(grid[j],0,gw*sizeof(MImagePoint));}
//printf("grid_size is %f,gw is %d,gh is %d\n",grid_size,gw,gh);
mListClear(list);
MImagePoint point;point.x=(x1+x2)/2;point.y=(y1+y2)/2;
mListWrite(list,DFLT,&point,sizeof(MImagePoint));
int x=floor(point.x/grid_size);int y=floor(point.y/grid_size);grid[y][x]=point;
for(int n=0;n<list->num;n++)
{
MImagePoint *p = (MImagePoint *)(list->data[n]);
int num = 0;
while(1)
{
num+=1;if(num>=64) break;
float d = (float)mRand(r*10000,r*15000)/10000.0f;
float a = (float)mRand(0,360000)/1000.0f;
float dx = d*mSin(a);point.x = p->x+dx; if((point.x>=x2)||(point.x<x1)) continue;
float dy = d*mCos(a);point.y = p->y+dy; if((point.y>=y2)||(point.y<y1)) continue;
x=floor(point.x/grid_size); y=floor(point.y/grid_size);
#define DISTANCE(P1,P2) ((P1.x-P2.x)*(P1.x-P2.x)+(P1.y-P2.y)*(P1.y-P2.y))
if((grid[y ][x ].x!=0)||(grid[y ][x ].y!=0)) { continue;}
if((y-2>=0) ) if((grid[y-2][x ].x!=0)||(grid[y-2][x ].y!=0)) {if(DISTANCE(grid[y-2][x ],point)<r2) continue;}
if((y-1>=0)&&(x-1>=0)) if((grid[y-1][x-1].x!=0)||(grid[y-1][x-1].y!=0)) {if(DISTANCE(grid[y-1][x-1],point)<r2) continue;}
if((y-1>=0) ) if((grid[y-1][x ].x!=0)||(grid[y-1][x ].y!=0)) {if(DISTANCE(grid[y-1][x ],point)<r2) continue;}
if((y-1>=0)&&(x+1<gw)) if((grid[y-1][x+1].x!=0)||(grid[y-1][x+1].y!=0)) {if(DISTANCE(grid[y-1][x+1],point)<r2) continue;}
if( (x-2>=0)) if((grid[y ][x-2].x!=0)||(grid[y ][x-2].y!=0)) {if(DISTANCE(grid[y ][x-2],point)<r2) continue;}
if( (x-1>=0)) if((grid[y ][x-1].x!=0)||(grid[y ][x-1].y!=0)) {if(DISTANCE(grid[y ][x-1],point)<r2) continue;}
if( (x+1<gw)) if((grid[y ][x+1].x!=0)||(grid[y ][x+1].y!=0)) {if(DISTANCE(grid[y ][x+1],point)<r2) continue;}
if( (x+2<gw)) if((grid[y ][x+2].x!=0)||(grid[y ][x+2].y!=0)) {if(DISTANCE(grid[y ][x+2],point)<r2) continue;}
if((y+1<gh)&&(x-1>=0)) if((grid[y+1][x-1].x!=0)||(grid[y+1][x-1].y!=0)) {if(DISTANCE(grid[y+1][x-1],point)<r2) continue;}
if((y+1<gh) ) if((grid[y+1][x ].x!=0)||(grid[y+1][x ].y!=0)) {if(DISTANCE(grid[y+1][x ],point)<r2) continue;}
if((y+1<gh)&&(x+1<gw)) if((grid[y+1][x+1].x!=0)||(grid[y+1][x+1].y!=0)) {if(DISTANCE(grid[y+1][x+1],point)<r2) continue;}
if((y+2<gh) ) if((grid[y+2][x ].x!=0)||(grid[y+2][x ].y!=0)) {if(DISTANCE(grid[y+2][x ],point)<r2) continue;}
num=0;
mListWrite(list,DFLT,&point,sizeof(MImagePoint));
grid[y][x]=point;
}
}
for(int j=0;j<gh;j++) mFree(grid[j]);
mFree(grid);
}
/*
void mPolygonDeleteVertex(MPolygon *src,MPolygon *dst,int locate)
{
int i;
mException(INVALID_POINTER(src),"invalid input",EXIT);
mException((locate >= src->n),"invalid input",EXIT);
if(INVALID_POINTER(dst)||(dst==src))
{
src->n = src->n-1;
for(i=locate;i<src->n;i++)
src->vertex[i] = src->vertex[i+1];
}
else
{
if(dst->n <src->n-1)
{
if(!INVALID_POINTER(dst->vertex))
mFree(dst->vertex);
dst->vertex = (MImagePoint *)mMalloc((src->n-1)*sizeof(MImagePoint));
}
dst->n = src->n -1;
memcpy(dst->vertex,src->vertex,locate*sizeof(MImagePoint));
memcpy(dst->vertex+locate,src->vertex+locate+1,(dst->n-locate)*sizeof(MImagePoint));
}
}
void mPolygonAddVertex(MPolygon *src,MPolygon *dst,int locate,int x,int y)
{
MImagePoint *point;
mException(INVALID_POINTER(src),"invalid input",EXIT);
mException((locate > src->n),"invalid input",EXIT);
if(dst->n > src->n+1)
point = dst->vertex;
else
point = (MImagePoint *)mMalloc((src->n+1)*sizeof(MImagePoint));
memcpy(point,src->vertex,locate*sizeof(MImagePoint));
point[locate].x = x;
point[locate].y = y;
memcpy(point+locate+1,(src->vertex)+locate,(src->n -locate)*sizeof(MImagePoint));
dst->n = src->n+1;
if((!INVALID_POINTER(dst->vertex))&&(point != dst->vertex))
mFree(src->vertex);
dst->vertex = point;
}
*/ |
kmp_sch_simd_guided.c | // RUN: %libomp-compile-and-run
/*
Test for the 'schedule(simd:guided)' clause.
Compiler needs to generate a dynamic dispatching and pass the schedule
value 46 to the OpenMP RTL. Test uses numerous loop parameter combinations.
*/
#include <stdio.h>
#include <omp.h>
#if defined(WIN32) || defined(_WIN32)
#include <windows.h>
#define delay() Sleep(1);
#else
#include <unistd.h>
#define delay() usleep(10);
#endif
// uncomment for debug diagnostics:
//#define DEBUG
#define SIMD_LEN 4
// ---------------------------------------------------------------------------
// Various definitions copied from OpenMP RTL
enum sched {
kmp_sch_static_balanced_chunked = 45,
kmp_sch_guided_simd = 46,
kmp_sch_runtime_simd = 47,
};
typedef unsigned u32;
typedef long long i64;
typedef unsigned long long u64;
typedef struct {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
extern int __kmpc_global_thread_num(id*);
extern void __kmpc_barrier(id*, int gtid);
extern void __kmpc_dispatch_init_4(id*, int, enum sched, int, int, int, int);
extern void __kmpc_dispatch_init_8(id*, int, enum sched, i64, i64, i64, i64);
extern int __kmpc_dispatch_next_4(id*, int, void*, void*, void*, void*);
extern int __kmpc_dispatch_next_8(id*, int, void*, void*, void*, void*);
// End of definitions copied from OpenMP RTL.
// ---------------------------------------------------------------------------
static id loc = {0, 2, 0, 0, ";file;func;0;0;;"};
// ---------------------------------------------------------------------------
int run_loop_64(i64 loop_lb, i64 loop_ub, i64 loop_st, int loop_chunk) {
int err = 0;
static int volatile loop_sync = 0;
i64 lb; // Chunk lower bound
i64 ub; // Chunk upper bound
i64 st; // Chunk stride
int rc;
int tid = omp_get_thread_num();
int gtid = tid;
int last;
#if DEBUG
printf("run_loop_<%d>(lb=%d, ub=%d, st=%d, ch=%d)\n",
(int)sizeof(i64), gtid, tid,
(int)loop_lb, (int)loop_ub, (int)loop_st, loop_chunk);
#endif
// Don't test degenerate cases that should have been discovered by codegen
if (loop_st == 0)
return 0;
if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub)
return 0;
__kmpc_dispatch_init_8(&loc, gtid, kmp_sch_guided_simd,
loop_lb, loop_ub, loop_st, loop_chunk);
if (tid == 0) {
// Let the master thread handle the chunks alone
int chunk; // No of current chunk
i64 next_lb; // Lower bound of the next chunk
i64 last_ub; // Upper bound of the last processed chunk
u64 cur; // Number of interations in current chunk
u64 max; // Max allowed iterations for current chunk
int undersized = 0;
chunk = 0;
next_lb = loop_lb;
max = (loop_ub - loop_lb) / loop_st + 1;
// The first chunk can consume all iterations
while (__kmpc_dispatch_next_8(&loc, gtid, &last, &lb, &ub, &st)) {
++ chunk;
#if DEBUG
printf("chunk=%d, lb=%d, ub=%d\n", chunk, (int)lb, (int)ub);
#endif
// Check if previous chunk (it is not the final chunk) is undersized
if (undersized) {
printf("Error with chunk %d\n", chunk);
err++;
}
// Check lower and upper bounds
if (lb != next_lb) {
printf("Error with lb %d, %d, ch %d\n", (int)lb, (int)next_lb, chunk);
err++;
}
if (loop_st > 0) {
if (!(ub <= loop_ub)) {
printf("Error with ub %d, %d, ch %d\n", (int)ub, (int)loop_ub, chunk);
err++;
}
if (!(lb <= ub)) {
printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk);
err++;
}
} else {
if (!(ub >= loop_ub)) {
printf("Error with ub %d, %d, %d\n", (int)ub, (int)loop_ub, chunk);
err++;
}
if (!(lb >= ub)) {
printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk);
err++;
}
}; // if
// Stride should not change
if (!(st == loop_st)) {
printf("Error with st %d, %d, ch %d\n", (int)st, (int)loop_st, chunk);
err++;
}
cur = (ub - lb) / loop_st + 1;
// Guided scheduling uses FP computations, so current chunk may
// be a bit bigger (+1) than allowed maximum
if (!(cur <= max + 1)) {
printf("Error with iter %llu, %llu\n", cur, max);
err++;
}
// Update maximum for the next chunk
if (cur < max)
max = cur;
next_lb = ub + loop_st;
last_ub = ub;
undersized = (cur < loop_chunk);
}; // while
// Must have at least one chunk
if (!(chunk > 0)) {
printf("Error with chunk %d\n", chunk);
err++;
}
// Must have the right last iteration index
if (loop_st > 0) {
if (!(last_ub <= loop_ub)) {
printf("Error with last1 %d, %d, ch %d\n",
(int)last_ub, (int)loop_ub, chunk);
err++;
}
if (!(last_ub + loop_st > loop_ub)) {
printf("Error with last2 %d, %d, %d, ch %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk);
err++;
}
} else {
if (!(last_ub >= loop_ub)) {
printf("Error with last1 %d, %d, ch %d\n",
(int)last_ub, (int)loop_ub, chunk);
err++;
}
if (!(last_ub + loop_st < loop_ub)) {
printf("Error with last2 %d, %d, %d, ch %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk);
err++;
}
}; // if
// Let non-master threads go
loop_sync = 1;
} else {
int i;
// Workers wait for master thread to finish, then call __kmpc_dispatch_next
for (i = 0; i < 1000000; ++ i) {
if (loop_sync != 0) {
break;
}; // if
}; // for i
while (loop_sync == 0) {
delay();
}; // while
// At this moment we do not have any more chunks -- all the chunks already
// processed by master thread
rc = __kmpc_dispatch_next_8(&loc, gtid, &last, &lb, &ub, &st);
if (rc) {
printf("Error return value\n");
err++;
}
}; // if
__kmpc_barrier(&loc, gtid);
if (tid == 0) {
loop_sync = 0; // Restore original state
#if DEBUG
printf("run_loop_64(): at the end\n");
#endif
}; // if
__kmpc_barrier(&loc, gtid);
return err;
} // run_loop
// ---------------------------------------------------------------------------
int run_loop_32(int loop_lb, int loop_ub, int loop_st, int loop_chunk) {
int err = 0;
static int volatile loop_sync = 0;
int lb; // Chunk lower bound
int ub; // Chunk upper bound
int st; // Chunk stride
int rc;
int tid = omp_get_thread_num();
int gtid = tid;
int last;
#if DEBUG
printf("run_loop_<%d>(lb=%d, ub=%d, st=%d, ch=%d)\n",
(int)sizeof(int), gtid, tid,
(int)loop_lb, (int)loop_ub, (int)loop_st, loop_chunk);
#endif
// Don't test degenerate cases that should have been discovered by codegen
if (loop_st == 0)
return 0;
if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub)
return 0;
__kmpc_dispatch_init_4(&loc, gtid, kmp_sch_guided_simd,
loop_lb, loop_ub, loop_st, loop_chunk);
if (tid == 0) {
// Let the master thread handle the chunks alone
int chunk; // No of current chunk
int next_lb; // Lower bound of the next chunk
int last_ub; // Upper bound of the last processed chunk
u64 cur; // Number of interations in current chunk
u64 max; // Max allowed iterations for current chunk
int undersized = 0;
chunk = 0;
next_lb = loop_lb;
max = (loop_ub - loop_lb) / loop_st + 1;
// The first chunk can consume all iterations
while (__kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st)) {
++ chunk;
#if DEBUG
printf("chunk=%d, lb=%d, ub=%d\n", chunk, (int)lb, (int)ub);
#endif
// Check if previous chunk (it is not the final chunk) is undersized
if (undersized) {
printf("Error with chunk %d\n", chunk);
err++;
}
// Check lower and upper bounds
if (lb != next_lb) {
printf("Error with lb %d, %d, ch %d\n", (int)lb, (int)next_lb, chunk);
err++;
}
if (loop_st > 0) {
if (!(ub <= loop_ub)) {
printf("Error with ub %d, %d, ch %d\n", (int)ub, (int)loop_ub, chunk);
err++;
}
if (!(lb <= ub)) {
printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk);
err++;
}
} else {
if (!(ub >= loop_ub)) {
printf("Error with ub %d, %d, %d\n", (int)ub, (int)loop_ub, chunk);
err++;
}
if (!(lb >= ub)) {
printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk);
err++;
}
}; // if
// Stride should not change
if (!(st == loop_st)) {
printf("Error with st %d, %d, ch %d\n", (int)st, (int)loop_st, chunk);
err++;
}
cur = (ub - lb) / loop_st + 1;
// Guided scheduling uses FP computations, so current chunk may
// be a bit bigger (+1) than allowed maximum
if (!(cur <= max + 1)) {
printf("Error with iter %llu, %llu\n", cur, max);
err++;
}
// Update maximum for the next chunk
if (cur < max)
max = cur;
next_lb = ub + loop_st;
last_ub = ub;
undersized = (cur < loop_chunk);
}; // while
// Must have at least one chunk
if (!(chunk > 0)) {
printf("Error with chunk %d\n", chunk);
err++;
}
// Must have the right last iteration index
if (loop_st > 0) {
if (!(last_ub <= loop_ub)) {
printf("Error with last1 %d, %d, ch %d\n",
(int)last_ub, (int)loop_ub, chunk);
err++;
}
if (!(last_ub + loop_st > loop_ub)) {
printf("Error with last2 %d, %d, %d, ch %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk);
err++;
}
} else {
if (!(last_ub >= loop_ub)) {
printf("Error with last1 %d, %d, ch %d\n",
(int)last_ub, (int)loop_ub, chunk);
err++;
}
if (!(last_ub + loop_st < loop_ub)) {
printf("Error with last2 %d, %d, %d, ch %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk);
err++;
}
}; // if
// Let non-master threads go
loop_sync = 1;
} else {
int i;
// Workers wait for master thread to finish, then call __kmpc_dispatch_next
for (i = 0; i < 1000000; ++ i) {
if (loop_sync != 0) {
break;
}; // if
}; // for i
while (loop_sync == 0) {
delay();
}; // while
// At this moment we do not have any more chunks -- all the chunks already
// processed by the master thread
rc = __kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st);
if (rc) {
printf("Error return value\n");
err++;
}
}; // if
__kmpc_barrier(&loc, gtid);
if (tid == 0) {
loop_sync = 0; // Restore original state
#if DEBUG
printf("run_loop<>(): at the end\n");
#endif
}; // if
__kmpc_barrier(&loc, gtid);
return err;
} // run_loop
// ---------------------------------------------------------------------------
int run_64(int num_th)
{
int err = 0;
#pragma omp parallel num_threads(num_th)
{
int chunk;
i64 st, lb, ub;
for (chunk = SIMD_LEN; chunk <= 3*SIMD_LEN; chunk += SIMD_LEN) {
for (st = 1; st <= 3; ++ st) {
for (lb = -3 * num_th * st; lb <= 3 * num_th * st; ++ lb) {
for (ub = lb; ub < lb + num_th * (chunk+1) * st; ++ ub) {
err += run_loop_64(lb, ub, st, chunk);
err += run_loop_64(ub, lb, -st, chunk);
}; // for ub
}; // for lb
}; // for st
}; // for chunk
}
return err;
} // run_all
int run_32(int num_th)
{
int err = 0;
#pragma omp parallel num_threads(num_th)
{
int chunk, st, lb, ub;
for (chunk = SIMD_LEN; chunk <= 3*SIMD_LEN; chunk += SIMD_LEN) {
for (st = 1; st <= 3; ++ st) {
for (lb = -3 * num_th * st; lb <= 3 * num_th * st; ++ lb) {
for (ub = lb; ub < lb + num_th * (chunk+1) * st; ++ ub) {
err += run_loop_32(lb, ub, st, chunk);
err += run_loop_32(ub, lb, -st, chunk);
}; // for ub
}; // for lb
}; // for st
}; // for chunk
}
return err;
} // run_all
// ---------------------------------------------------------------------------
int main()
{
int n, err = 0;
for (n = 1; n <= 4; ++ n) {
err += run_32(n);
err += run_64(n);
}; // for n
if (err)
printf("failed with %d errors\n", err);
else
printf("passed\n");
return err;
}
|
omp.h | /*
* include/omp.h.var
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef __OMP_H
# define __OMP_H
# include <stdlib.h>
# include <stdint.h>
# define KMP_VERSION_MAJOR 5
# define KMP_VERSION_MINOR 0
# define KMP_VERSION_BUILD 20140926
# define KMP_BUILD_DATE "No_Timestamp"
# ifdef __cplusplus
extern "C" {
# endif
# define omp_set_affinity_format ompc_set_affinity_format
# define omp_get_affinity_format ompc_get_affinity_format
# define omp_display_affinity ompc_display_affinity
# define omp_capture_affinity ompc_capture_affinity
# if defined(_WIN32)
# define __KAI_KMPC_CONVENTION __cdecl
# ifndef __KMP_IMP
# define __KMP_IMP __declspec(dllimport)
# endif
# else
# define __KAI_KMPC_CONVENTION
# ifndef __KMP_IMP
# define __KMP_IMP
# endif
# endif
/* schedule kind constants */
typedef enum omp_sched_t {
omp_sched_static = 1,
omp_sched_dynamic = 2,
omp_sched_guided = 3,
omp_sched_auto = 4,
omp_sched_monotonic = 0x80000000
} omp_sched_t;
/* set API functions */
extern void __KAI_KMPC_CONVENTION omp_set_num_threads (int);
extern void __KAI_KMPC_CONVENTION omp_set_dynamic (int);
extern void __KAI_KMPC_CONVENTION omp_set_nested (int);
extern void __KAI_KMPC_CONVENTION omp_set_max_active_levels (int);
extern void __KAI_KMPC_CONVENTION omp_set_schedule (omp_sched_t, int);
/* query API functions */
extern int __KAI_KMPC_CONVENTION omp_get_num_threads (void);
extern int __KAI_KMPC_CONVENTION omp_get_dynamic (void);
extern int __KAI_KMPC_CONVENTION omp_get_nested (void);
extern int __KAI_KMPC_CONVENTION omp_get_max_threads (void);
extern int __KAI_KMPC_CONVENTION omp_get_thread_num (void);
extern int __KAI_KMPC_CONVENTION omp_get_num_procs (void);
extern int __KAI_KMPC_CONVENTION omp_in_parallel (void);
extern int __KAI_KMPC_CONVENTION omp_in_final (void);
extern int __KAI_KMPC_CONVENTION omp_get_active_level (void);
extern int __KAI_KMPC_CONVENTION omp_get_level (void);
extern int __KAI_KMPC_CONVENTION omp_get_ancestor_thread_num (int);
extern int __KAI_KMPC_CONVENTION omp_get_team_size (int);
extern int __KAI_KMPC_CONVENTION omp_get_thread_limit (void);
extern int __KAI_KMPC_CONVENTION omp_get_max_active_levels (void);
extern void __KAI_KMPC_CONVENTION omp_get_schedule (omp_sched_t *, int *);
extern int __KAI_KMPC_CONVENTION omp_get_max_task_priority (void);
/* lock API functions */
typedef struct omp_lock_t {
void * _lk;
} omp_lock_t;
extern void __KAI_KMPC_CONVENTION omp_init_lock (omp_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_set_lock (omp_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_unset_lock (omp_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_destroy_lock (omp_lock_t *);
extern int __KAI_KMPC_CONVENTION omp_test_lock (omp_lock_t *);
/* nested lock API functions */
typedef struct omp_nest_lock_t {
void * _lk;
} omp_nest_lock_t;
extern void __KAI_KMPC_CONVENTION omp_init_nest_lock (omp_nest_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_set_nest_lock (omp_nest_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_unset_nest_lock (omp_nest_lock_t *);
extern void __KAI_KMPC_CONVENTION omp_destroy_nest_lock (omp_nest_lock_t *);
extern int __KAI_KMPC_CONVENTION omp_test_nest_lock (omp_nest_lock_t *);
/* OpenMP 5.0 Synchronization hints*/
typedef enum omp_sync_hint_t {
omp_sync_hint_none = 0,
omp_lock_hint_none = omp_sync_hint_none,
omp_sync_hint_uncontended = 1,
omp_lock_hint_uncontended = omp_sync_hint_uncontended,
omp_sync_hint_contended = (1<<1),
omp_lock_hint_contended = omp_sync_hint_contended,
omp_sync_hint_nonspeculative = (1<<2),
omp_lock_hint_nonspeculative = omp_sync_hint_nonspeculative,
omp_sync_hint_speculative = (1<<3),
omp_lock_hint_speculative = omp_sync_hint_speculative,
kmp_lock_hint_hle = (1<<16),
kmp_lock_hint_rtm = (1<<17),
kmp_lock_hint_adaptive = (1<<18)
} omp_sync_hint_t;
/* lock hint type for dynamic user lock */
typedef omp_sync_hint_t omp_lock_hint_t;
/* hinted lock initializers */
extern void __KAI_KMPC_CONVENTION omp_init_lock_with_hint(omp_lock_t *, omp_lock_hint_t);
extern void __KAI_KMPC_CONVENTION omp_init_nest_lock_with_hint(omp_nest_lock_t *, omp_lock_hint_t);
/* time API functions */
extern double __KAI_KMPC_CONVENTION omp_get_wtime (void);
extern double __KAI_KMPC_CONVENTION omp_get_wtick (void);
/* OpenMP 4.0 */
extern int __KAI_KMPC_CONVENTION omp_get_default_device (void);
extern void __KAI_KMPC_CONVENTION omp_set_default_device (int);
extern int __KAI_KMPC_CONVENTION omp_is_initial_device (void);
extern int __KAI_KMPC_CONVENTION omp_get_num_devices (void);
extern int __KAI_KMPC_CONVENTION omp_get_num_teams (void);
extern int __KAI_KMPC_CONVENTION omp_get_team_num (void);
extern int __KAI_KMPC_CONVENTION omp_get_cancellation (void);
/* OpenMP 4.5 */
extern int __KAI_KMPC_CONVENTION omp_get_initial_device (void);
extern void* __KAI_KMPC_CONVENTION omp_target_alloc(size_t, int);
extern void __KAI_KMPC_CONVENTION omp_target_free(void *, int);
extern int __KAI_KMPC_CONVENTION omp_target_is_present(const void *, int);
extern int __KAI_KMPC_CONVENTION omp_target_memcpy(void *, const void *, size_t, size_t, size_t, int, int);
extern int __KAI_KMPC_CONVENTION omp_target_memcpy_rect(void *, const void *, size_t, int, const size_t *,
const size_t *, const size_t *, const size_t *, const size_t *, int, int);
extern int __KAI_KMPC_CONVENTION omp_target_associate_ptr(const void *, const void *, size_t, size_t, int);
extern int __KAI_KMPC_CONVENTION omp_target_disassociate_ptr(const void *, int);
/* OpenMP 5.0 */
extern int __KAI_KMPC_CONVENTION omp_get_device_num (void);
typedef void * omp_depend_t;
/* OpenMP 5.1 interop */
typedef intptr_t omp_intptr_t;
/* 0..omp_get_num_interop_properties()-1 are reserved for implementation-defined properties */
typedef enum omp_interop_property {
omp_ipr_fr_id = -1,
omp_ipr_fr_name = -2,
omp_ipr_vendor = -3,
omp_ipr_vendor_name = -4,
omp_ipr_device_num = -5,
omp_ipr_platform = -6,
omp_ipr_device = -7,
omp_ipr_device_context = -8,
omp_ipr_targetsync = -9,
omp_ipr_first = -9
} omp_interop_property_t;
#define omp_interop_none 0
typedef enum omp_interop_rc {
omp_irc_no_value = 1,
omp_irc_success = 0,
omp_irc_empty = -1,
omp_irc_out_of_range = -2,
omp_irc_type_int = -3,
omp_irc_type_ptr = -4,
omp_irc_type_str = -5,
omp_irc_other = -6
} omp_interop_rc_t;
typedef enum omp_interop_fr {
omp_ifr_cuda = 1,
omp_ifr_cuda_driver = 2,
omp_ifr_opencl = 3,
omp_ifr_sycl = 4,
omp_ifr_hip = 5,
omp_ifr_level_zero = 6,
omp_ifr_last = 7
} omp_interop_fr_t;
typedef void * omp_interop_t;
/*!
* The `omp_get_num_interop_properties` routine retrieves the number of implementation-defined properties available for an `omp_interop_t` object.
*/
extern int __KAI_KMPC_CONVENTION omp_get_num_interop_properties(const omp_interop_t);
/*!
* The `omp_get_interop_int` routine retrieves an integer property from an `omp_interop_t` object.
*/
extern omp_intptr_t __KAI_KMPC_CONVENTION omp_get_interop_int(const omp_interop_t, omp_interop_property_t, int *);
/*!
* The `omp_get_interop_ptr` routine retrieves a pointer property from an `omp_interop_t` object.
*/
extern void * __KAI_KMPC_CONVENTION omp_get_interop_ptr(const omp_interop_t, omp_interop_property_t, int *);
/*!
* The `omp_get_interop_str` routine retrieves a string property from an `omp_interop_t` object.
*/
extern const char * __KAI_KMPC_CONVENTION omp_get_interop_str(const omp_interop_t, omp_interop_property_t, int *);
/*!
* The `omp_get_interop_name` routine retrieves a property name from an `omp_interop_t` object.
*/
extern const char * __KAI_KMPC_CONVENTION omp_get_interop_name(const omp_interop_t, omp_interop_property_t);
/*!
* The `omp_get_interop_type_desc` routine retrieves a description of the type of a property associated with an `omp_interop_t` object.
*/
extern const char * __KAI_KMPC_CONVENTION omp_get_interop_type_desc(const omp_interop_t, omp_interop_property_t);
/*!
* The `omp_get_interop_rc_desc` routine retrieves a description of the return code associated with an `omp_interop_t` object.
*/
extern const char * __KAI_KMPC_CONVENTION omp_get_interop_rc_desc(const omp_interop_t, omp_interop_rc_t);
/* OpenMP 5.1 device memory routines */
/*!
* The `omp_target_memcpy_async` routine asynchronously performs a copy between any combination of host and device pointers.
*/
extern int __KAI_KMPC_CONVENTION omp_target_memcpy_async(void *, const void *, size_t, size_t, size_t, int,
int, int, omp_depend_t *);
/*!
* The `omp_target_memcpy_rect_async` routine asynchronously performs a copy between any combination of host and device pointers.
*/
extern int __KAI_KMPC_CONVENTION omp_target_memcpy_rect_async(void *, const void *, size_t, int, const size_t *,
const size_t *, const size_t *, const size_t *, const size_t *, int, int,
int, omp_depend_t *);
/*!
* The `omp_get_mapped_ptr` routine returns the device pointer that is associated with a host pointer for a given device.
*/
extern void * __KAI_KMPC_CONVENTION omp_get_mapped_ptr(const void *, int);
extern int __KAI_KMPC_CONVENTION omp_target_is_accessible(const void *, size_t, int);
/* kmp API functions */
extern int __KAI_KMPC_CONVENTION kmp_get_stacksize (void);
extern void __KAI_KMPC_CONVENTION kmp_set_stacksize (int);
extern size_t __KAI_KMPC_CONVENTION kmp_get_stacksize_s (void);
extern void __KAI_KMPC_CONVENTION kmp_set_stacksize_s (size_t);
extern int __KAI_KMPC_CONVENTION kmp_get_blocktime (void);
extern int __KAI_KMPC_CONVENTION kmp_get_library (void);
extern void __KAI_KMPC_CONVENTION kmp_set_blocktime (int);
extern void __KAI_KMPC_CONVENTION kmp_set_library (int);
extern void __KAI_KMPC_CONVENTION kmp_set_library_serial (void);
extern void __KAI_KMPC_CONVENTION kmp_set_library_turnaround (void);
extern void __KAI_KMPC_CONVENTION kmp_set_library_throughput (void);
extern void __KAI_KMPC_CONVENTION kmp_set_defaults (char const *);
extern void __KAI_KMPC_CONVENTION kmp_set_disp_num_buffers (int);
/* Intel affinity API */
typedef void * kmp_affinity_mask_t;
extern int __KAI_KMPC_CONVENTION kmp_set_affinity (kmp_affinity_mask_t *);
extern int __KAI_KMPC_CONVENTION kmp_get_affinity (kmp_affinity_mask_t *);
extern int __KAI_KMPC_CONVENTION kmp_get_affinity_max_proc (void);
extern void __KAI_KMPC_CONVENTION kmp_create_affinity_mask (kmp_affinity_mask_t *);
extern void __KAI_KMPC_CONVENTION kmp_destroy_affinity_mask (kmp_affinity_mask_t *);
extern int __KAI_KMPC_CONVENTION kmp_set_affinity_mask_proc (int, kmp_affinity_mask_t *);
extern int __KAI_KMPC_CONVENTION kmp_unset_affinity_mask_proc (int, kmp_affinity_mask_t *);
extern int __KAI_KMPC_CONVENTION kmp_get_affinity_mask_proc (int, kmp_affinity_mask_t *);
/* OpenMP 4.0 affinity API */
typedef enum omp_proc_bind_t {
omp_proc_bind_false = 0,
omp_proc_bind_true = 1,
omp_proc_bind_master = 2,
omp_proc_bind_close = 3,
omp_proc_bind_spread = 4
} omp_proc_bind_t;
extern omp_proc_bind_t __KAI_KMPC_CONVENTION omp_get_proc_bind (void);
/* OpenMP 4.5 affinity API */
extern int __KAI_KMPC_CONVENTION omp_get_num_places (void);
extern int __KAI_KMPC_CONVENTION omp_get_place_num_procs (int);
extern void __KAI_KMPC_CONVENTION omp_get_place_proc_ids (int, int *);
extern int __KAI_KMPC_CONVENTION omp_get_place_num (void);
extern int __KAI_KMPC_CONVENTION omp_get_partition_num_places (void);
extern void __KAI_KMPC_CONVENTION omp_get_partition_place_nums (int *);
extern void * __KAI_KMPC_CONVENTION kmp_malloc (size_t);
extern void * __KAI_KMPC_CONVENTION kmp_aligned_malloc (size_t, size_t);
extern void * __KAI_KMPC_CONVENTION kmp_calloc (size_t, size_t);
extern void * __KAI_KMPC_CONVENTION kmp_realloc (void *, size_t);
extern void __KAI_KMPC_CONVENTION kmp_free (void *);
extern void __KAI_KMPC_CONVENTION kmp_set_warnings_on(void);
extern void __KAI_KMPC_CONVENTION kmp_set_warnings_off(void);
/* OpenMP 5.0 Tool Control */
typedef enum omp_control_tool_result_t {
omp_control_tool_notool = -2,
omp_control_tool_nocallback = -1,
omp_control_tool_success = 0,
omp_control_tool_ignored = 1
} omp_control_tool_result_t;
typedef enum omp_control_tool_t {
omp_control_tool_start = 1,
omp_control_tool_pause = 2,
omp_control_tool_flush = 3,
omp_control_tool_end = 4
} omp_control_tool_t;
extern int __KAI_KMPC_CONVENTION omp_control_tool(int, int, void*);
/* OpenMP 5.0 Memory Management */
typedef uintptr_t omp_uintptr_t;
typedef enum {
omp_atk_sync_hint = 1,
omp_atk_alignment = 2,
omp_atk_access = 3,
omp_atk_pool_size = 4,
omp_atk_fallback = 5,
omp_atk_fb_data = 6,
omp_atk_pinned = 7,
omp_atk_partition = 8
} omp_alloctrait_key_t;
typedef enum {
omp_atv_false = 0,
omp_atv_true = 1,
omp_atv_contended = 3,
omp_atv_uncontended = 4,
omp_atv_serialized = 5,
omp_atv_sequential = omp_atv_serialized, // (deprecated)
omp_atv_private = 6,
omp_atv_all = 7,
omp_atv_thread = 8,
omp_atv_pteam = 9,
omp_atv_cgroup = 10,
omp_atv_default_mem_fb = 11,
omp_atv_null_fb = 12,
omp_atv_abort_fb = 13,
omp_atv_allocator_fb = 14,
omp_atv_environment = 15,
omp_atv_nearest = 16,
omp_atv_blocked = 17,
omp_atv_interleaved = 18
} omp_alloctrait_value_t;
#define omp_atv_default ((omp_uintptr_t)-1)
typedef struct {
omp_alloctrait_key_t key;
omp_uintptr_t value;
} omp_alloctrait_t;
# if defined(_WIN32)
// On Windows cl and icl do not support 64-bit enum, let's use integer then.
typedef omp_uintptr_t omp_allocator_handle_t;
extern __KMP_IMP omp_allocator_handle_t const omp_null_allocator;
extern __KMP_IMP omp_allocator_handle_t const omp_default_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_large_cap_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_const_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_high_bw_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_low_lat_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_cgroup_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_pteam_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const omp_thread_mem_alloc;
/* Preview of target memory support */
extern __KMP_IMP omp_allocator_handle_t const llvm_omp_target_host_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const llvm_omp_target_shared_mem_alloc;
extern __KMP_IMP omp_allocator_handle_t const llvm_omp_target_device_mem_alloc;
typedef omp_uintptr_t omp_memspace_handle_t;
extern __KMP_IMP omp_memspace_handle_t const omp_default_mem_space;
extern __KMP_IMP omp_memspace_handle_t const omp_large_cap_mem_space;
extern __KMP_IMP omp_memspace_handle_t const omp_const_mem_space;
extern __KMP_IMP omp_memspace_handle_t const omp_high_bw_mem_space;
extern __KMP_IMP omp_memspace_handle_t const omp_low_lat_mem_space;
/* Preview of target memory support */
extern __KMP_IMP omp_memspace_handle_t const llvm_omp_target_host_mem_space;
extern __KMP_IMP omp_memspace_handle_t const llvm_omp_target_shared_mem_space;
extern __KMP_IMP omp_memspace_handle_t const llvm_omp_target_device_mem_space;
# else
# if __cplusplus >= 201103
typedef enum omp_allocator_handle_t : omp_uintptr_t
# else
typedef enum omp_allocator_handle_t
# endif
{
omp_null_allocator = 0,
omp_default_mem_alloc = 1,
omp_large_cap_mem_alloc = 2,
omp_const_mem_alloc = 3,
omp_high_bw_mem_alloc = 4,
omp_low_lat_mem_alloc = 5,
omp_cgroup_mem_alloc = 6,
omp_pteam_mem_alloc = 7,
omp_thread_mem_alloc = 8,
/* Preview of target memory support */
llvm_omp_target_host_mem_alloc = 100,
llvm_omp_target_shared_mem_alloc = 101,
llvm_omp_target_device_mem_alloc = 102,
KMP_ALLOCATOR_MAX_HANDLE = UINTPTR_MAX
} omp_allocator_handle_t;
# if __cplusplus >= 201103
typedef enum omp_memspace_handle_t : omp_uintptr_t
# else
typedef enum omp_memspace_handle_t
# endif
{
omp_default_mem_space = 0,
omp_large_cap_mem_space = 1,
omp_const_mem_space = 2,
omp_high_bw_mem_space = 3,
omp_low_lat_mem_space = 4,
/* Preview of target memory support */
llvm_omp_target_host_mem_space = 100,
llvm_omp_target_shared_mem_space = 101,
llvm_omp_target_device_mem_space = 102,
KMP_MEMSPACE_MAX_HANDLE = UINTPTR_MAX
} omp_memspace_handle_t;
# endif
extern omp_allocator_handle_t __KAI_KMPC_CONVENTION omp_init_allocator(omp_memspace_handle_t m,
int ntraits, omp_alloctrait_t traits[]);
extern void __KAI_KMPC_CONVENTION omp_destroy_allocator(omp_allocator_handle_t allocator);
extern void __KAI_KMPC_CONVENTION omp_set_default_allocator(omp_allocator_handle_t a);
extern omp_allocator_handle_t __KAI_KMPC_CONVENTION omp_get_default_allocator(void);
# ifdef __cplusplus
extern void *__KAI_KMPC_CONVENTION omp_alloc(size_t size, omp_allocator_handle_t a = omp_null_allocator);
extern void *__KAI_KMPC_CONVENTION omp_calloc(size_t nmemb, size_t size, omp_allocator_handle_t a = omp_null_allocator);
extern void *__KAI_KMPC_CONVENTION omp_realloc(void *ptr, size_t size,
omp_allocator_handle_t allocator = omp_null_allocator,
omp_allocator_handle_t free_allocator = omp_null_allocator);
extern void __KAI_KMPC_CONVENTION omp_free(void * ptr, omp_allocator_handle_t a = omp_null_allocator);
# else
extern void *__KAI_KMPC_CONVENTION omp_alloc(size_t size, omp_allocator_handle_t a);
extern void *__KAI_KMPC_CONVENTION omp_calloc(size_t nmemb, size_t size, omp_allocator_handle_t a);
extern void *__KAI_KMPC_CONVENTION omp_realloc(void *ptr, size_t size, omp_allocator_handle_t allocator,
omp_allocator_handle_t free_allocator);
extern void __KAI_KMPC_CONVENTION omp_free(void *ptr, omp_allocator_handle_t a);
# endif
/* OpenMP 5.0 Affinity Format */
extern void __KAI_KMPC_CONVENTION omp_set_affinity_format(char const *);
extern size_t __KAI_KMPC_CONVENTION omp_get_affinity_format(char *, size_t);
extern void __KAI_KMPC_CONVENTION omp_display_affinity(char const *);
extern size_t __KAI_KMPC_CONVENTION omp_capture_affinity(char *, size_t, char const *);
/* OpenMP 5.0 events */
# if defined(_WIN32)
// On Windows cl and icl do not support 64-bit enum, let's use integer then.
typedef omp_uintptr_t omp_event_handle_t;
# else
typedef enum omp_event_handle_t { KMP_EVENT_MAX_HANDLE = UINTPTR_MAX } omp_event_handle_t;
# endif
extern void __KAI_KMPC_CONVENTION omp_fulfill_event ( omp_event_handle_t event );
/* OpenMP 5.0 Pause Resources */
typedef enum omp_pause_resource_t {
omp_pause_resume = 0,
omp_pause_soft = 1,
omp_pause_hard = 2
} omp_pause_resource_t;
extern int __KAI_KMPC_CONVENTION omp_pause_resource(omp_pause_resource_t, int);
extern int __KAI_KMPC_CONVENTION omp_pause_resource_all(omp_pause_resource_t);
extern int __KAI_KMPC_CONVENTION omp_get_supported_active_levels(void);
/* OpenMP 5.1 */
extern void __KAI_KMPC_CONVENTION omp_set_num_teams(int num_teams);
extern int __KAI_KMPC_CONVENTION omp_get_max_teams(void);
extern void __KAI_KMPC_CONVENTION omp_set_teams_thread_limit(int limit);
extern int __KAI_KMPC_CONVENTION omp_get_teams_thread_limit(void);
/* OpenMP 5.1 Display Environment */
extern void omp_display_env(int verbose);
# if defined(_OPENMP) && _OPENMP >= 201811
#pragma omp begin declare variant match(device={kind(host)})
static inline int omp_is_initial_device(void) { return 1; }
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind(nohost)})
static inline int omp_is_initial_device(void) { return 0; }
#pragma omp end declare variant
# endif
# undef __KAI_KMPC_CONVENTION
# undef __KMP_IMP
/* Warning:
The following typedefs are not standard, deprecated and will be removed in a future release.
*/
typedef int omp_int_t;
typedef double omp_wtime_t;
# ifdef __cplusplus
}
# endif
#endif /* __OMP_H */
|
GB_binop__lor_bool.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_bool)
// A*D function (colscale): GB (_AxD__lor_bool)
// D*A function (rowscale): GB (_DxB__lor_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_bool)
// C=scalar+B GB (_bind1st__lor_bool)
// C=scalar+B' GB (_bind1st_tran__lor_bool)
// C=A+scalar GB (_bind2nd__lor_bool)
// C=A'+scalar GB (_bind2nd_tran__lor_bool)
// C type: bool
// A type: bool
// A pattern? 0
// B type: bool
// B pattern? 0
// BinaryOp: cij = (aij || bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x || y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_BOOL || GxB_NO_LOR_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
bool alpha_scalar ;
bool beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((bool *) alpha_scalar_in)) ;
beta_scalar = (*((bool *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = (x || bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = (aij || y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x || aij) ; \
}
GrB_Info GB (_bind1st_tran__lor_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij || y) ; \
}
GrB_Info GB (_bind2nd_tran__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_critical_section.c | //------------------------------------------------------------------------------
// Source/Template/GB_critical_section: execute code in a critical section
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// All access to the global matrix queue, via GB_queue_* operations, must
// be done through a critical section. No other part of SuiteSparse:GraphBLAS
// uses this critical section; it is only used for accessing the global matrix
// queue via GB_queue_*. All GB_queue_* operations use the GB_CRITICAL macro
// to check the result, and if the critical section fails (ok == false),
// they return GrB_PANIC.
// Critical sections for Windows threads and ANSI C11 threads are listed below
// as drafts, but these threading models are not yet supported.
{
//--------------------------------------------------------------------------
// POSIX pthreads
//--------------------------------------------------------------------------
#if defined (USER_POSIX_THREADS)
{
ok = (pthread_mutex_lock (&(GB_Global.sync)) == 0) ;
{
GB_CRITICAL_SECTION ;
}
ok = ok && (pthread_mutex_unlock (&(GB_Global.sync)) == 0) ;
}
//--------------------------------------------------------------------------
// Microsoft Windows
//--------------------------------------------------------------------------
#elif defined (USER_WINDOWS_THREADS)
{
// This is not yet supported.
EnterCriticalSection (&(GB_Global.sync)) ;
{
GB_CRITICAL_SECTION ;
}
LeaveCriticalSection (&(GB_Global.sync)) ;
}
//--------------------------------------------------------------------------
// ANSI C11 threads
//--------------------------------------------------------------------------
#elif defined (USER_ANSI_THREADS)
{
// This should work per the ANSI C11 Spec, but is not yet supported.
ok = (mtx_lock (&(GB_Global.sync)) == thrd_success) ;
{
GB_CRITICAL_SECTION ;
}
ok = ok && (mtx_unlock (&(GB_Global.sync)) == thrd_success) ;
}
//--------------------------------------------------------------------------
// OpenMP
//--------------------------------------------------------------------------
#else // USER_OPENMP_THREADS or USER_NO_THREADS
{
// default: use a named OpenMP critical section. If OpenMP is not
// available, then the #pragma is ignored and this becomes vanilla,
// single-threaded code.
#pragma omp critical (GB_critical_section)
{
GB_CRITICAL_SECTION ;
}
}
#endif
}
#undef GB_CRITICAL_SECTION
|
omp-low.c | /* Lowering pass for OMP directives. Converts OMP directives into explicit
calls to the runtime library (libgomp), data marshalling to implement data
sharing and copying clauses, offloading to accelerators, and more.
Contributed by Diego Novillo <dnovillo@redhat.com>
Copyright (C) 2005-2020 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "tree.h"
#include "gimple.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "pretty-print.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "internal-fn.h"
#include "gimple-fold.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "tree-iterator.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "splay-tree.h"
#include "omp-general.h"
#include "omp-low.h"
#include "omp-grid.h"
#include "gimple-low.h"
#include "alloc-pool.h"
#include "symbol-summary.h"
#include "tree-nested.h"
#include "context.h"
#include "gomp-constants.h"
#include "gimple-pretty-print.h"
#include "hsa-common.h"
#include "stringpool.h"
#include "attribs.h"
/* Lowering of OMP parallel and workshare constructs proceeds in two
phases. The first phase scans the function looking for OMP statements
and then for variables that must be replaced to satisfy data sharing
clauses. The second phase expands code for the constructs, as well as
re-gimplifying things when variables have been replaced with complex
expressions.
Final code generation is done by pass_expand_omp. The flowgraph is
scanned for regions which are then moved to a new
function, to be invoked by the thread library, or offloaded. */
/* Context structure. Used to store information about each parallel
directive in the code. */
struct omp_context
{
/* This field must be at the beginning, as we do "inheritance": Some
callback functions for tree-inline.c (e.g., omp_copy_decl)
receive a copy_body_data pointer that is up-casted to an
omp_context pointer. */
copy_body_data cb;
/* The tree of contexts corresponding to the encountered constructs. */
struct omp_context *outer;
gimple *stmt;
/* Map variables to fields in a structure that allows communication
between sending and receiving threads. */
splay_tree field_map;
tree record_type;
tree sender_decl;
tree receiver_decl;
/* These are used just by task contexts, if task firstprivate fn is
needed. srecord_type is used to communicate from the thread
that encountered the task construct to task firstprivate fn,
record_type is allocated by GOMP_task, initialized by task firstprivate
fn and passed to the task body fn. */
splay_tree sfield_map;
tree srecord_type;
/* A chain of variables to add to the top-level block surrounding the
construct. In the case of a parallel, this is in the child function. */
tree block_vars;
/* Label to which GOMP_cancel{,llation_point} and explicit and implicit
barriers should jump to during omplower pass. */
tree cancel_label;
/* The sibling GIMPLE_OMP_FOR simd with _simt_ clause or NULL
otherwise. */
gimple *simt_stmt;
/* For task reductions registered in this context, a vector containing
the length of the private copies block (if constant, otherwise NULL)
and then offsets (if constant, otherwise NULL) for each entry. */
vec<tree> task_reductions;
/* A hash map from the reduction clauses to the registered array
elts. */
hash_map<tree, unsigned> *task_reduction_map;
/* And a hash map from the lastprivate(conditional:) variables to their
corresponding tracking loop iteration variables. */
hash_map<tree, tree> *lastprivate_conditional_map;
/* A tree_list of the reduction clauses in this context. This is
only used for checking the consistency of OpenACC reduction
clauses in scan_omp_for and is not guaranteed to contain a valid
value outside of this function. */
tree local_reduction_clauses;
/* A tree_list of the reduction clauses in outer contexts. This is
only used for checking the consistency of OpenACC reduction
clauses in scan_omp_for and is not guaranteed to contain a valid
value outside of this function. */
tree outer_reduction_clauses;
/* Nesting depth of this context. Used to beautify error messages re
invalid gotos. The outermost ctx is depth 1, with depth 0 being
reserved for the main body of the function. */
int depth;
/* True if this parallel directive is nested within another. */
bool is_nested;
/* True if this construct can be cancelled. */
bool cancellable;
/* True if lower_omp_1 should look up lastprivate conditional in parent
context. */
bool combined_into_simd_safelen1;
/* True if there is nested scan context with inclusive clause. */
bool scan_inclusive;
/* True if there is nested scan context with exclusive clause. */
bool scan_exclusive;
/* True in the second simd loop of for simd with inscan reductions. */
bool for_simd_scan_phase;
/* True if there is order(concurrent) clause on the construct. */
bool order_concurrent;
/* True if there is bind clause on the construct (i.e. a loop construct). */
bool loop_p;
};
static splay_tree all_contexts;
static int taskreg_nesting_level;
static int target_nesting_level;
static bitmap task_shared_vars;
static bitmap global_nonaddressable_vars;
static vec<omp_context *> taskreg_contexts;
static void scan_omp (gimple_seq *, omp_context *);
static tree scan_omp_1_op (tree *, int *, void *);
#define WALK_SUBSTMTS \
case GIMPLE_BIND: \
case GIMPLE_TRY: \
case GIMPLE_CATCH: \
case GIMPLE_EH_FILTER: \
case GIMPLE_TRANSACTION: \
/* The sub-statements for these should be walked. */ \
*handled_ops_p = false; \
break;
/* Return true if CTX corresponds to an OpenACC 'parallel' or 'serial'
region. */
static bool
is_oacc_parallel_or_serial (omp_context *ctx)
{
enum gimple_code outer_type = gimple_code (ctx->stmt);
return ((outer_type == GIMPLE_OMP_TARGET)
&& ((gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_OACC_PARALLEL)
|| (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_OACC_SERIAL)));
}
/* Return true if CTX corresponds to an oacc kernels region. */
static bool
is_oacc_kernels (omp_context *ctx)
{
enum gimple_code outer_type = gimple_code (ctx->stmt);
return ((outer_type == GIMPLE_OMP_TARGET)
&& (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_OACC_KERNELS));
}
/* If DECL is the artificial dummy VAR_DECL created for non-static
data member privatization, return the underlying "this" parameter,
otherwise return NULL. */
tree
omp_member_access_dummy_var (tree decl)
{
if (!VAR_P (decl)
|| !DECL_ARTIFICIAL (decl)
|| !DECL_IGNORED_P (decl)
|| !DECL_HAS_VALUE_EXPR_P (decl)
|| !lang_hooks.decls.omp_disregard_value_expr (decl, false))
return NULL_TREE;
tree v = DECL_VALUE_EXPR (decl);
if (TREE_CODE (v) != COMPONENT_REF)
return NULL_TREE;
while (1)
switch (TREE_CODE (v))
{
case COMPONENT_REF:
case MEM_REF:
case INDIRECT_REF:
CASE_CONVERT:
case POINTER_PLUS_EXPR:
v = TREE_OPERAND (v, 0);
continue;
case PARM_DECL:
if (DECL_CONTEXT (v) == current_function_decl
&& DECL_ARTIFICIAL (v)
&& TREE_CODE (TREE_TYPE (v)) == POINTER_TYPE)
return v;
return NULL_TREE;
default:
return NULL_TREE;
}
}
/* Helper for unshare_and_remap, called through walk_tree. */
static tree
unshare_and_remap_1 (tree *tp, int *walk_subtrees, void *data)
{
tree *pair = (tree *) data;
if (*tp == pair[0])
{
*tp = unshare_expr (pair[1]);
*walk_subtrees = 0;
}
else if (IS_TYPE_OR_DECL_P (*tp))
*walk_subtrees = 0;
return NULL_TREE;
}
/* Return unshare_expr (X) with all occurrences of FROM
replaced with TO. */
static tree
unshare_and_remap (tree x, tree from, tree to)
{
tree pair[2] = { from, to };
x = unshare_expr (x);
walk_tree (&x, unshare_and_remap_1, pair, NULL);
return x;
}
/* Convenience function for calling scan_omp_1_op on tree operands. */
static inline tree
scan_omp_op (tree *tp, omp_context *ctx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = ctx;
wi.want_locations = true;
return walk_tree (tp, scan_omp_1_op, &wi, NULL);
}
static void lower_omp (gimple_seq *, omp_context *);
static tree lookup_decl_in_outer_ctx (tree, omp_context *);
static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
/* Return true if CTX is for an omp parallel. */
static inline bool
is_parallel_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
}
/* Return true if CTX is for an omp task. */
static inline bool
is_task_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
}
/* Return true if CTX is for an omp taskloop. */
static inline bool
is_taskloop_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP;
}
/* Return true if CTX is for a host omp teams. */
static inline bool
is_host_teams_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
&& gimple_omp_teams_host (as_a <gomp_teams *> (ctx->stmt));
}
/* Return true if CTX is for an omp parallel or omp task or host omp teams
(the last one is strictly not a task region in OpenMP speak, but we
need to treat it similarly). */
static inline bool
is_taskreg_ctx (omp_context *ctx)
{
return is_parallel_ctx (ctx) || is_task_ctx (ctx) || is_host_teams_ctx (ctx);
}
/* Return true if EXPR is variable sized. */
static inline bool
is_variable_sized (const_tree expr)
{
return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
}
/* Lookup variables. The "maybe" form
allows for the variable form to not have been entered, otherwise we
assert that the variable must have been entered. */
static inline tree
lookup_decl (tree var, omp_context *ctx)
{
tree *n = ctx->cb.decl_map->get (var);
return *n;
}
static inline tree
maybe_lookup_decl (const_tree var, omp_context *ctx)
{
tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
return n ? *n : NULL_TREE;
}
static inline tree
lookup_field (tree var, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
return (tree) n->value;
}
static inline tree
lookup_sfield (splay_tree_key key, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->sfield_map
? ctx->sfield_map : ctx->field_map, key);
return (tree) n->value;
}
static inline tree
lookup_sfield (tree var, omp_context *ctx)
{
return lookup_sfield ((splay_tree_key) var, ctx);
}
static inline tree
maybe_lookup_field (splay_tree_key key, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, key);
return n ? (tree) n->value : NULL_TREE;
}
static inline tree
maybe_lookup_field (tree var, omp_context *ctx)
{
return maybe_lookup_field ((splay_tree_key) var, ctx);
}
/* Return true if DECL should be copied by pointer. SHARED_CTX is
the parallel context if DECL is to be shared. */
static bool
use_pointer_for_field (tree decl, omp_context *shared_ctx)
{
if (AGGREGATE_TYPE_P (TREE_TYPE (decl))
|| TYPE_ATOMIC (TREE_TYPE (decl)))
return true;
/* We can only use copy-in/copy-out semantics for shared variables
when we know the value is not accessible from an outer scope. */
if (shared_ctx)
{
gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt));
/* ??? Trivially accessible from anywhere. But why would we even
be passing an address in this case? Should we simply assert
this to be false, or should we have a cleanup pass that removes
these from the list of mappings? */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, shared_ctx)))
return true;
/* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
without analyzing the expression whether or not its location
is accessible to anyone else. In the case of nested parallel
regions it certainly may be. */
if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
return true;
/* Do not use copy-in/copy-out for variables that have their
address taken. */
if (is_global_var (decl))
{
/* For file scope vars, track whether we've seen them as
non-addressable initially and in that case, keep the same
answer for the duration of the pass, even when they are made
addressable later on e.g. through reduction expansion. Global
variables which weren't addressable before the pass will not
have their privatized copies address taken. See PR91216. */
if (!TREE_ADDRESSABLE (decl))
{
if (!global_nonaddressable_vars)
global_nonaddressable_vars = BITMAP_ALLOC (NULL);
bitmap_set_bit (global_nonaddressable_vars, DECL_UID (decl));
}
else if (!global_nonaddressable_vars
|| !bitmap_bit_p (global_nonaddressable_vars,
DECL_UID (decl)))
return true;
}
else if (TREE_ADDRESSABLE (decl))
return true;
/* lower_send_shared_vars only uses copy-in, but not copy-out
for these. */
if (TREE_READONLY (decl)
|| ((TREE_CODE (decl) == RESULT_DECL
|| TREE_CODE (decl) == PARM_DECL)
&& DECL_BY_REFERENCE (decl)))
return false;
/* Disallow copy-in/out in nested parallel if
decl is shared in outer parallel, otherwise
each thread could store the shared variable
in its own copy-in location, making the
variable no longer really shared. */
if (shared_ctx->is_nested)
{
omp_context *up;
for (up = shared_ctx->outer; up; up = up->outer)
if ((is_taskreg_ctx (up)
|| (gimple_code (up->stmt) == GIMPLE_OMP_TARGET
&& is_gimple_omp_offloaded (up->stmt)))
&& maybe_lookup_decl (decl, up))
break;
if (up)
{
tree c;
if (gimple_code (up->stmt) == GIMPLE_OMP_TARGET)
{
for (c = gimple_omp_target_clauses (up->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_DECL (c) == decl)
break;
}
else
for (c = gimple_omp_taskreg_clauses (up->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& OMP_CLAUSE_DECL (c) == decl)
break;
if (c)
goto maybe_mark_addressable_and_ret;
}
}
/* For tasks avoid using copy-in/out. As tasks can be
deferred or executed in different thread, when GOMP_task
returns, the task hasn't necessarily terminated. */
if (is_task_ctx (shared_ctx))
{
tree outer;
maybe_mark_addressable_and_ret:
outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
if (is_gimple_reg (outer) && !omp_member_access_dummy_var (outer))
{
/* Taking address of OUTER in lower_send_shared_vars
might need regimplification of everything that uses the
variable. */
if (!task_shared_vars)
task_shared_vars = BITMAP_ALLOC (NULL);
bitmap_set_bit (task_shared_vars, DECL_UID (outer));
TREE_ADDRESSABLE (outer) = 1;
}
return true;
}
}
return false;
}
/* Construct a new automatic decl similar to VAR. */
static tree
omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
{
tree copy = copy_var_decl (var, name, type);
DECL_CONTEXT (copy) = current_function_decl;
DECL_CHAIN (copy) = ctx->block_vars;
/* If VAR is listed in task_shared_vars, it means it wasn't
originally addressable and is just because task needs to take
it's address. But we don't need to take address of privatizations
from that var. */
if (TREE_ADDRESSABLE (var)
&& ((task_shared_vars
&& bitmap_bit_p (task_shared_vars, DECL_UID (var)))
|| (global_nonaddressable_vars
&& bitmap_bit_p (global_nonaddressable_vars, DECL_UID (var)))))
TREE_ADDRESSABLE (copy) = 0;
ctx->block_vars = copy;
return copy;
}
static tree
omp_copy_decl_1 (tree var, omp_context *ctx)
{
return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
}
/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
as appropriate. */
static tree
omp_build_component_ref (tree obj, tree field)
{
tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
if (TREE_THIS_VOLATILE (field))
TREE_THIS_VOLATILE (ret) |= 1;
if (TREE_READONLY (field))
TREE_READONLY (ret) |= 1;
return ret;
}
/* Build tree nodes to access the field for VAR on the receiver side. */
static tree
build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
{
tree x, field = lookup_field (var, ctx);
/* If the receiver record type was remapped in the child function,
remap the field into the new record type. */
x = maybe_lookup_field (field, ctx);
if (x != NULL)
field = x;
x = build_simple_mem_ref (ctx->receiver_decl);
TREE_THIS_NOTRAP (x) = 1;
x = omp_build_component_ref (x, field);
if (by_ref)
{
x = build_simple_mem_ref (x);
TREE_THIS_NOTRAP (x) = 1;
}
return x;
}
/* Build tree nodes to access VAR in the scope outer to CTX. In the case
of a parallel, this is a component reference; for workshare constructs
this is some variable. */
static tree
build_outer_var_ref (tree var, omp_context *ctx,
enum omp_clause_code code = OMP_CLAUSE_ERROR)
{
tree x;
omp_context *outer = ctx->outer;
while (outer && gimple_code (outer->stmt) == GIMPLE_OMP_TASKGROUP)
outer = outer->outer;
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
x = var;
else if (is_variable_sized (var))
{
x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
x = build_outer_var_ref (x, ctx, code);
x = build_simple_mem_ref (x);
}
else if (is_taskreg_ctx (ctx))
{
bool by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
}
else if ((gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
|| ctx->loop_p
|| (code == OMP_CLAUSE_PRIVATE
&& (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
|| gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS
|| gimple_code (ctx->stmt) == GIMPLE_OMP_SINGLE)))
{
/* #pragma omp simd isn't a worksharing construct, and can reference
even private vars in its linear etc. clauses.
Similarly for OMP_CLAUSE_PRIVATE with outer ref, that can refer
to private vars in all worksharing constructs. */
x = NULL_TREE;
if (outer && is_taskreg_ctx (outer))
x = lookup_decl (var, outer);
else if (outer)
x = maybe_lookup_decl_in_outer_ctx (var, ctx);
if (x == NULL_TREE)
x = var;
}
else if (code == OMP_CLAUSE_LASTPRIVATE && is_taskloop_ctx (ctx))
{
gcc_assert (outer);
splay_tree_node n
= splay_tree_lookup (outer->field_map,
(splay_tree_key) &DECL_UID (var));
if (n == NULL)
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, outer)))
x = var;
else
x = lookup_decl (var, outer);
}
else
{
tree field = (tree) n->value;
/* If the receiver record type was remapped in the child function,
remap the field into the new record type. */
x = maybe_lookup_field (field, outer);
if (x != NULL)
field = x;
x = build_simple_mem_ref (outer->receiver_decl);
x = omp_build_component_ref (x, field);
if (use_pointer_for_field (var, outer))
x = build_simple_mem_ref (x);
}
}
else if (outer)
{
if (gimple_code (outer->stmt) == GIMPLE_OMP_GRID_BODY)
{
outer = outer->outer;
gcc_assert (outer
&& gimple_code (outer->stmt) != GIMPLE_OMP_GRID_BODY);
}
x = lookup_decl (var, outer);
}
else if (omp_is_reference (var))
/* This can happen with orphaned constructs. If var is reference, it is
possible it is shared and as such valid. */
x = var;
else if (omp_member_access_dummy_var (var))
x = var;
else
gcc_unreachable ();
if (x == var)
{
tree t = omp_member_access_dummy_var (var);
if (t)
{
x = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
if (o != t)
x = unshare_and_remap (x, t, o);
else
x = unshare_expr (x);
}
}
if (omp_is_reference (var))
x = build_simple_mem_ref (x);
return x;
}
/* Build tree nodes to access the field for VAR on the sender side. */
static tree
build_sender_ref (splay_tree_key key, omp_context *ctx)
{
tree field = lookup_sfield (key, ctx);
return omp_build_component_ref (ctx->sender_decl, field);
}
static tree
build_sender_ref (tree var, omp_context *ctx)
{
return build_sender_ref ((splay_tree_key) var, ctx);
}
/* Add a new field for VAR inside the structure CTX->SENDER_DECL. If
BASE_POINTERS_RESTRICT, declare the field with restrict. */
static void
install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
{
tree field, type, sfield = NULL_TREE;
splay_tree_key key = (splay_tree_key) var;
if ((mask & 16) != 0)
{
key = (splay_tree_key) &DECL_NAME (var);
gcc_checking_assert (key != (splay_tree_key) var);
}
if ((mask & 8) != 0)
{
key = (splay_tree_key) &DECL_UID (var);
gcc_checking_assert (key != (splay_tree_key) var);
}
gcc_assert ((mask & 1) == 0
|| !splay_tree_lookup (ctx->field_map, key));
gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
|| !splay_tree_lookup (ctx->sfield_map, key));
gcc_assert ((mask & 3) == 3
|| !is_gimple_omp_oacc (ctx->stmt));
type = TREE_TYPE (var);
if ((mask & 16) != 0)
type = lang_hooks.decls.omp_array_data (var, true);
/* Prevent redeclaring the var in the split-off function with a restrict
pointer type. Note that we only clear type itself, restrict qualifiers in
the pointed-to type will be ignored by points-to analysis. */
if (POINTER_TYPE_P (type)
&& TYPE_RESTRICT (type))
type = build_qualified_type (type, TYPE_QUALS (type) & ~TYPE_QUAL_RESTRICT);
if (mask & 4)
{
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
type = build_pointer_type (build_pointer_type (type));
}
else if (by_ref)
type = build_pointer_type (type);
else if ((mask & 3) == 1 && omp_is_reference (var))
type = TREE_TYPE (type);
field = build_decl (DECL_SOURCE_LOCATION (var),
FIELD_DECL, DECL_NAME (var), type);
/* Remember what variable this field was created for. This does have a
side effect of making dwarf2out ignore this member, so for helpful
debugging we clear it later in delete_omp_context. */
DECL_ABSTRACT_ORIGIN (field) = var;
if ((mask & 16) == 0 && type == TREE_TYPE (var))
{
SET_DECL_ALIGN (field, DECL_ALIGN (var));
DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
}
else
SET_DECL_ALIGN (field, TYPE_ALIGN (type));
if ((mask & 3) == 3)
{
insert_field_into_struct (ctx->record_type, field);
if (ctx->srecord_type)
{
sfield = build_decl (DECL_SOURCE_LOCATION (var),
FIELD_DECL, DECL_NAME (var), type);
DECL_ABSTRACT_ORIGIN (sfield) = var;
SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
insert_field_into_struct (ctx->srecord_type, sfield);
}
}
else
{
if (ctx->srecord_type == NULL_TREE)
{
tree t;
ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
{
sfield = build_decl (DECL_SOURCE_LOCATION (t),
FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
insert_field_into_struct (ctx->srecord_type, sfield);
splay_tree_insert (ctx->sfield_map,
(splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
(splay_tree_value) sfield);
}
}
sfield = field;
insert_field_into_struct ((mask & 1) ? ctx->record_type
: ctx->srecord_type, field);
}
if (mask & 1)
splay_tree_insert (ctx->field_map, key, (splay_tree_value) field);
if ((mask & 2) && ctx->sfield_map)
splay_tree_insert (ctx->sfield_map, key, (splay_tree_value) sfield);
}
static tree
install_var_local (tree var, omp_context *ctx)
{
tree new_var = omp_copy_decl_1 (var, ctx);
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
/* Adjust the replacement for DECL in CTX for the new context. This means
copying the DECL_VALUE_EXPR, and fixing up the type. */
static void
fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
{
tree new_decl, size;
new_decl = lookup_decl (decl, ctx);
TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
&& DECL_HAS_VALUE_EXPR_P (decl))
{
tree ve = DECL_VALUE_EXPR (decl);
walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
SET_DECL_VALUE_EXPR (new_decl, ve);
DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
}
if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
{
size = remap_decl (DECL_SIZE (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE (TREE_TYPE (new_decl));
DECL_SIZE (new_decl) = size;
size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
DECL_SIZE_UNIT (new_decl) = size;
}
}
/* The callback for remap_decl. Search all containing contexts for a
mapping of the variable; this avoids having to duplicate the splay
tree ahead of time. We know a mapping doesn't already exist in the
given context. Create new mappings to implement default semantics. */
static tree
omp_copy_decl (tree var, copy_body_data *cb)
{
omp_context *ctx = (omp_context *) cb;
tree new_var;
if (TREE_CODE (var) == LABEL_DECL)
{
if (FORCED_LABEL (var) || DECL_NONLOCAL (var))
return var;
new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
DECL_CONTEXT (new_var) = current_function_decl;
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
while (!is_taskreg_ctx (ctx))
{
ctx = ctx->outer;
if (ctx == NULL)
return var;
new_var = maybe_lookup_decl (var, ctx);
if (new_var)
return new_var;
}
if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
return var;
return error_mark_node;
}
/* Create a new context, with OUTER_CTX being the surrounding context. */
static omp_context *
new_omp_context (gimple *stmt, omp_context *outer_ctx)
{
omp_context *ctx = XCNEW (omp_context);
splay_tree_insert (all_contexts, (splay_tree_key) stmt,
(splay_tree_value) ctx);
ctx->stmt = stmt;
if (outer_ctx)
{
ctx->outer = outer_ctx;
ctx->cb = outer_ctx->cb;
ctx->cb.block = NULL;
ctx->depth = outer_ctx->depth + 1;
}
else
{
ctx->cb.src_fn = current_function_decl;
ctx->cb.dst_fn = current_function_decl;
ctx->cb.src_node = cgraph_node::get (current_function_decl);
gcc_checking_assert (ctx->cb.src_node);
ctx->cb.dst_node = ctx->cb.src_node;
ctx->cb.src_cfun = cfun;
ctx->cb.copy_decl = omp_copy_decl;
ctx->cb.eh_lp_nr = 0;
ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
ctx->cb.adjust_array_error_bounds = true;
ctx->cb.dont_remap_vla_if_no_change = true;
ctx->depth = 1;
}
ctx->cb.decl_map = new hash_map<tree, tree>;
return ctx;
}
static gimple_seq maybe_catch_exception (gimple_seq);
/* Finalize task copyfn. */
static void
finalize_task_copyfn (gomp_task *task_stmt)
{
struct function *child_cfun;
tree child_fn;
gimple_seq seq = NULL, new_seq;
gbind *bind;
child_fn = gimple_omp_task_copy_fn (task_stmt);
if (child_fn == NULL_TREE)
return;
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
push_cfun (child_cfun);
bind = gimplify_body (child_fn, false);
gimple_seq_add_stmt (&seq, bind);
new_seq = maybe_catch_exception (seq);
if (new_seq != seq)
{
bind = gimple_build_bind (NULL, new_seq, NULL);
seq = NULL;
gimple_seq_add_stmt (&seq, bind);
}
gimple_set_body (child_fn, seq);
pop_cfun ();
/* Inform the callgraph about the new function. */
cgraph_node *node = cgraph_node::get_create (child_fn);
node->parallelized_function = 1;
cgraph_node::add_new_function (child_fn, false);
}
/* Destroy a omp_context data structures. Called through the splay tree
value delete callback. */
static void
delete_omp_context (splay_tree_value value)
{
omp_context *ctx = (omp_context *) value;
delete ctx->cb.decl_map;
if (ctx->field_map)
splay_tree_delete (ctx->field_map);
if (ctx->sfield_map)
splay_tree_delete (ctx->sfield_map);
/* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
it produces corrupt debug information. */
if (ctx->record_type)
{
tree t;
for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
if (ctx->srecord_type)
{
tree t;
for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
if (is_task_ctx (ctx))
finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
if (ctx->task_reduction_map)
{
ctx->task_reductions.release ();
delete ctx->task_reduction_map;
}
delete ctx->lastprivate_conditional_map;
XDELETE (ctx);
}
/* Fix up RECEIVER_DECL with a type that has been remapped to the child
context. */
static void
fixup_child_record_type (omp_context *ctx)
{
tree f, type = ctx->record_type;
if (!ctx->receiver_decl)
return;
/* ??? It isn't sufficient to just call remap_type here, because
variably_modified_type_p doesn't work the way we expect for
record types. Testing each field for whether it needs remapping
and creating a new record by hand works, however. */
for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
break;
if (f)
{
tree name, new_fields = NULL;
type = lang_hooks.types.make_type (RECORD_TYPE);
name = DECL_NAME (TYPE_NAME (ctx->record_type));
name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
TYPE_DECL, name, type);
TYPE_NAME (type) = name;
for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
{
tree new_f = copy_node (f);
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
DECL_CHAIN (new_f) = new_fields;
walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
&ctx->cb, NULL);
walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
&ctx->cb, NULL);
new_fields = new_f;
/* Arrange to be able to look up the receiver field
given the sender field. */
splay_tree_insert (ctx->field_map, (splay_tree_key) f,
(splay_tree_value) new_f);
}
TYPE_FIELDS (type) = nreverse (new_fields);
layout_type (type);
}
/* In a target region we never modify any of the pointers in *.omp_data_i,
so attempt to help the optimizers. */
if (is_gimple_omp_offloaded (ctx->stmt))
type = build_qualified_type (type, TYPE_QUAL_CONST);
TREE_TYPE (ctx->receiver_decl)
= build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
}
/* Instantiate decls as necessary in CTX to satisfy the data sharing
specified by CLAUSES. */
static void
scan_sharing_clauses (tree clauses, omp_context *ctx)
{
tree c, decl;
bool scan_array_reductions = false;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
bool by_ref;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
goto do_private;
else if (!is_variable_sized (decl))
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
/* Ignore shared directives in teams construct inside of
target construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
&& !is_host_teams_ctx (ctx))
{
/* Global variables don't need to be copied,
the receiver side will use them directly. */
tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
if (is_global_var (odecl))
break;
insert_decl_map (&ctx->cb, decl, odecl);
break;
}
gcc_assert (is_taskreg_ctx (ctx));
gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
|| !is_variable_sized (decl));
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
use_pointer_for_field (decl, ctx);
break;
}
by_ref = use_pointer_for_field (decl, NULL);
if ((! TREE_READONLY (decl) && !OMP_CLAUSE_SHARED_READONLY (c))
|| TREE_ADDRESSABLE (decl)
|| by_ref
|| omp_is_reference (decl))
{
by_ref = use_pointer_for_field (decl, ctx);
install_var_field (decl, by_ref, 3, ctx);
install_var_local (decl, ctx);
break;
}
/* We don't need to copy const scalar vars back. */
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
goto do_private;
case OMP_CLAUSE_REDUCTION:
/* Collect 'reduction' clauses on OpenACC compute construct. */
if (is_gimple_omp_oacc (ctx->stmt)
&& is_gimple_omp_offloaded (ctx->stmt))
{
/* No 'reduction' clauses on OpenACC 'kernels'. */
gcc_checking_assert (!is_oacc_kernels (ctx));
ctx->local_reduction_clauses
= tree_cons (NULL, c, ctx->local_reduction_clauses);
}
/* FALLTHRU */
case OMP_CLAUSE_IN_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (decl) == MEM_REF)
{
tree t = TREE_OPERAND (decl, 0);
if (TREE_CODE (t) == POINTER_PLUS_EXPR)
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == INDIRECT_REF
|| TREE_CODE (t) == ADDR_EXPR)
t = TREE_OPERAND (t, 0);
install_var_local (t, ctx);
if (is_taskreg_ctx (ctx)
&& (!is_global_var (maybe_lookup_decl_in_outer_ctx (t, ctx))
|| (is_task_ctx (ctx)
&& (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
|| (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_TYPE (t)))
== POINTER_TYPE)))))
&& !is_variable_sized (t)
&& (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
|| (!OMP_CLAUSE_REDUCTION_TASK (c)
&& !is_task_ctx (ctx))))
{
by_ref = use_pointer_for_field (t, NULL);
if (is_task_ctx (ctx)
&& TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE
&& TREE_CODE (TREE_TYPE (TREE_TYPE (t))) == POINTER_TYPE)
{
install_var_field (t, false, 1, ctx);
install_var_field (t, by_ref, 2, ctx);
}
else
install_var_field (t, by_ref, 3, ctx);
}
break;
}
if (is_task_ctx (ctx)
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_TASK (c)
&& is_parallel_ctx (ctx)))
{
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (!is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
{
by_ref = use_pointer_for_field (decl, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION)
install_var_field (decl, by_ref, 3, ctx);
}
install_var_local (decl, ctx);
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_TASK (c))
{
install_var_local (decl, ctx);
break;
}
goto do_private;
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LINEAR:
decl = OMP_CLAUSE_DECL (c);
do_private:
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
&& is_gimple_omp_offloaded (ctx->stmt))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
install_var_field (decl, !omp_is_reference (decl), 3, ctx);
else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 3, ctx);
else
install_var_field (decl, false, 3, ctx);
}
if (is_variable_sized (decl))
{
if (is_task_ctx (ctx))
install_var_field (decl, false, 1, ctx);
break;
}
else if (is_taskreg_ctx (ctx))
{
bool global
= is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
by_ref = use_pointer_for_field (decl, NULL);
if (is_task_ctx (ctx)
&& (global || by_ref || omp_is_reference (decl)))
{
install_var_field (decl, false, 1, ctx);
if (!global)
install_var_field (decl, by_ref, 2, ctx);
}
else if (!global)
install_var_field (decl, by_ref, 3, ctx);
}
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
decl = OMP_CLAUSE_DECL (c);
/* Fortran array descriptors. */
if (lang_hooks.decls.omp_array_data (decl, true))
install_var_field (decl, false, 19, ctx);
else if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_ADDR
&& !omp_is_reference (decl)
&& !omp_is_allocatable_or_ptr (decl))
|| TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 11, ctx);
else
install_var_field (decl, false, 11, ctx);
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
}
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_IS_DEVICE_PTR:
decl = OMP_CLAUSE_DECL (c);
goto do_private;
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__REDUCTEMP_:
gcc_assert (is_taskreg_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
install_var_field (decl, false, 3, ctx);
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
decl = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (decl, NULL);
install_var_field (decl, by_ref, 3, ctx);
break;
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
break;
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_MAP:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
decl = OMP_CLAUSE_DECL (c);
/* Global variables with "omp declare target" attribute
don't need to be copied, the receiver side will use them
directly. However, global variables with "omp declare target link"
attribute need to be copied. Or when ALWAYS modifier is used. */
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& DECL_P (decl)
&& ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_REFERENCE))
|| TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_TO
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_FROM
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_TOFROM
&& is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
&& varpool_node::get_create (decl)->offloadable
&& !lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl)))
break;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER)
{
/* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
not offloaded; there is nothing to map for those. */
if (!is_gimple_omp_offloaded (ctx->stmt)
&& !POINTER_TYPE_P (TREE_TYPE (decl))
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
{
if (TREE_CODE (decl) == COMPONENT_REF
|| (TREE_CODE (decl) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
== REFERENCE_TYPE)))
break;
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
}
install_var_local (decl, ctx);
break;
}
if (DECL_P (decl))
{
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_field (decl2, true, 3, ctx);
install_var_local (decl2, ctx);
install_var_local (decl, ctx);
}
else
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 7, ctx);
else
install_var_field (decl, true, 3, ctx);
if (is_gimple_omp_offloaded (ctx->stmt)
&& !OMP_CLAUSE_MAP_IN_REDUCTION (c))
install_var_local (decl, ctx);
}
}
else
{
tree base = get_base_address (decl);
tree nc = OMP_CLAUSE_CHAIN (c);
if (DECL_P (base)
&& nc != NULL_TREE
&& OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_DECL (nc) == base
&& OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
&& integer_zerop (OMP_CLAUSE_SIZE (nc)))
{
OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
}
else
{
if (ctx->outer)
{
scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
decl = OMP_CLAUSE_DECL (c);
}
gcc_assert (!splay_tree_lookup (ctx->field_map,
(splay_tree_key) decl));
tree field
= build_decl (OMP_CLAUSE_LOCATION (c),
FIELD_DECL, NULL_TREE, ptr_type_node);
SET_DECL_ALIGN (field, TYPE_ALIGN (ptr_type_node));
insert_field_into_struct (ctx->record_type, field);
splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
(splay_tree_value) field);
}
}
break;
case OMP_CLAUSE__GRIDDIM_:
if (ctx->outer)
{
scan_omp_op (&OMP_CLAUSE__GRIDDIM__SIZE (c), ctx->outer);
scan_omp_op (&OMP_CLAUSE__GRIDDIM__GROUP (c), ctx->outer);
}
break;
case OMP_CLAUSE_ORDER:
ctx->order_concurrent = true;
break;
case OMP_CLAUSE_BIND:
ctx->loop_p = true;
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE__SIMT_:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_NONTEMPORAL:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
case OMP_CLAUSE_TASK_REDUCTION:
break;
case OMP_CLAUSE_ALIGNED:
decl = OMP_CLAUSE_DECL (c);
if (is_global_var (decl)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_local (decl, ctx);
break;
case OMP_CLAUSE__CONDTEMP_:
decl = OMP_CLAUSE_DECL (c);
if (is_parallel_ctx (ctx))
{
install_var_field (decl, false, 3, ctx);
install_var_local (decl, ctx);
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD
&& !OMP_CLAUSE__CONDTEMP__ITER (c))
install_var_local (decl, ctx);
break;
case OMP_CLAUSE__CACHE_:
default:
gcc_unreachable ();
}
}
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
scan_array_reductions = true;
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_LINEAR:
case OMP_CLAUSE_IS_DEVICE_PTR:
decl = OMP_CLAUSE_DECL (c);
if (is_variable_sized (decl))
{
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
&& is_gimple_omp_offloaded (ctx->stmt))
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
fixup_remapped_decl (decl2, ctx, false);
}
install_var_local (decl, ctx);
}
fixup_remapped_decl (decl, ctx,
OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_PRIVATE_DEBUG (c));
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (decl) != MEM_REF)
{
if (is_variable_sized (decl))
install_var_local (decl, ctx);
fixup_remapped_decl (decl, ctx, false);
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_TASK_REDUCTION:
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct inside of
target construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
&& !is_host_teams_ctx (ctx))
break;
decl = OMP_CLAUSE_DECL (c);
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
ctx->outer)))
break;
bool by_ref = use_pointer_for_field (decl, ctx);
install_var_field (decl, by_ref, 11, ctx);
break;
}
fixup_remapped_decl (decl, ctx, false);
break;
case OMP_CLAUSE_MAP:
if (!is_gimple_omp_offloaded (ctx->stmt))
break;
decl = OMP_CLAUSE_DECL (c);
if (DECL_P (decl)
&& ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_REFERENCE))
|| TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
&& is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
&& varpool_node::get_create (decl)->offloadable)
break;
if (DECL_P (decl))
{
if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
&& !COMPLETE_TYPE_P (TREE_TYPE (decl)))
{
tree new_decl = lookup_decl (decl, ctx);
TREE_TYPE (new_decl)
= remap_type (TREE_TYPE (decl), &ctx->cb);
}
else if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
fixup_remapped_decl (decl2, ctx, false);
fixup_remapped_decl (decl, ctx, true);
}
else
fixup_remapped_decl (decl, ctx, false);
}
break;
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__REDUCTEMP_:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ORDER:
case OMP_CLAUSE_BIND:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_NONTEMPORAL:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE__GRIDDIM_:
case OMP_CLAUSE__SIMT_:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
case OMP_CLAUSE__CONDTEMP_:
break;
case OMP_CLAUSE__CACHE_:
default:
gcc_unreachable ();
}
}
gcc_checking_assert (!scan_array_reductions
|| !is_gimple_omp_oacc (ctx->stmt));
if (scan_array_reductions)
{
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
}
}
/* Create a new name for omp child function. Returns an identifier. */
static tree
create_omp_child_function_name (bool task_copy)
{
return clone_function_name_numbered (current_function_decl,
task_copy ? "_omp_cpyfn" : "_omp_fn");
}
/* Return true if CTX may belong to offloaded code: either if current function
is offloaded, or any enclosing context corresponds to a target region. */
static bool
omp_maybe_offloaded_ctx (omp_context *ctx)
{
if (cgraph_node::get (current_function_decl)->offloadable)
return true;
for (; ctx; ctx = ctx->outer)
if (is_gimple_omp_offloaded (ctx->stmt))
return true;
return false;
}
/* Build a decl for the omp child function. It'll not contain a body
yet, just the bare decl. */
static void
create_omp_child_function (omp_context *ctx, bool task_copy)
{
tree decl, type, name, t;
name = create_omp_child_function_name (task_copy);
if (task_copy)
type = build_function_type_list (void_type_node, ptr_type_node,
ptr_type_node, NULL_TREE);
else
type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt)
|| !task_copy);
if (!task_copy)
ctx->cb.dst_fn = decl;
else
gimple_omp_task_set_copy_fn (ctx->stmt, decl);
TREE_STATIC (decl) = 1;
TREE_USED (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 0;
TREE_PUBLIC (decl) = 0;
DECL_UNINLINABLE (decl) = 1;
DECL_EXTERNAL (decl) = 0;
DECL_CONTEXT (decl) = NULL_TREE;
DECL_INITIAL (decl) = make_node (BLOCK);
BLOCK_SUPERCONTEXT (DECL_INITIAL (decl)) = decl;
DECL_ATTRIBUTES (decl) = DECL_ATTRIBUTES (current_function_decl);
/* Remove omp declare simd attribute from the new attributes. */
if (tree a = lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (decl)))
{
while (tree a2 = lookup_attribute ("omp declare simd", TREE_CHAIN (a)))
a = a2;
a = TREE_CHAIN (a);
for (tree *p = &DECL_ATTRIBUTES (decl); *p != a;)
if (is_attribute_p ("omp declare simd", get_attribute_name (*p)))
*p = TREE_CHAIN (*p);
else
{
tree chain = TREE_CHAIN (*p);
*p = copy_node (*p);
p = &TREE_CHAIN (*p);
*p = chain;
}
}
DECL_FUNCTION_SPECIFIC_OPTIMIZATION (decl)
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (current_function_decl);
DECL_FUNCTION_SPECIFIC_TARGET (decl)
= DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl);
DECL_FUNCTION_VERSIONED (decl)
= DECL_FUNCTION_VERSIONED (current_function_decl);
if (omp_maybe_offloaded_ctx (ctx))
{
cgraph_node::get_create (decl)->offloadable = 1;
if (ENABLE_OFFLOADING)
g->have_offload = true;
}
if (cgraph_node::get_create (decl)->offloadable
&& !lookup_attribute ("omp declare target",
DECL_ATTRIBUTES (current_function_decl)))
{
const char *target_attr = (is_gimple_omp_offloaded (ctx->stmt)
? "omp target entrypoint"
: "omp declare target");
DECL_ATTRIBUTES (decl)
= tree_cons (get_identifier (target_attr),
NULL_TREE, DECL_ATTRIBUTES (decl));
}
t = build_decl (DECL_SOURCE_LOCATION (decl),
RESULT_DECL, NULL_TREE, void_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_CONTEXT (t) = decl;
DECL_RESULT (decl) = t;
tree data_name = get_identifier (".omp_data_i");
t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
TREE_READONLY (t) = 1;
DECL_ARGUMENTS (decl) = t;
if (!task_copy)
ctx->receiver_decl = t;
else
{
t = build_decl (DECL_SOURCE_LOCATION (decl),
PARM_DECL, get_identifier (".omp_data_o"),
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
TREE_ADDRESSABLE (t) = 1;
DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
DECL_ARGUMENTS (decl) = t;
}
/* Allocate memory for the function structure. The call to
allocate_struct_function clobbers CFUN, so we need to restore
it afterward. */
push_struct_function (decl);
cfun->function_end_locus = gimple_location (ctx->stmt);
init_tree_ssa (cfun);
pop_cfun ();
}
/* Callback for walk_gimple_seq. Check if combined parallel
contains gimple_omp_for_combined_into_p OMP_FOR. */
tree
omp_find_combined_for (gimple_stmt_iterator *gsi_p,
bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_FOR:
if (gimple_omp_for_combined_into_p (stmt)
&& gimple_omp_for_kind (stmt)
== *(const enum gf_mask *) (wi->info))
{
wi->info = stmt;
return integer_zero_node;
}
break;
default:
break;
}
return NULL;
}
/* Add _LOOPTEMP_/_REDUCTEMP_ clauses on OpenMP parallel or task. */
static void
add_taskreg_looptemp_clauses (enum gf_mask msk, gimple *stmt,
omp_context *outer_ctx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &msk;
walk_gimple_seq (gimple_omp_body (stmt), omp_find_combined_for, NULL, &wi);
if (wi.info != (void *) &msk)
{
gomp_for *for_stmt = as_a <gomp_for *> ((gimple *) wi.info);
struct omp_for_data fd;
omp_extract_for_data (for_stmt, &fd, NULL);
/* We need two temporaries with fd.loop.v type (istart/iend)
and then (fd.collapse - 1) temporaries with the same
type for count2 ... countN-1 vars if not constant. */
size_t count = 2, i;
tree type = fd.iter_type;
if (fd.collapse > 1
&& TREE_CODE (fd.loop.n2) != INTEGER_CST)
{
count += fd.collapse - 1;
/* If there are lastprivate clauses on the inner
GIMPLE_OMP_FOR, add one more temporaries for the total number
of iterations (product of count1 ... countN-1). */
if (omp_find_clause (gimple_omp_for_clauses (for_stmt),
OMP_CLAUSE_LASTPRIVATE))
count++;
else if (msk == GF_OMP_FOR_KIND_FOR
&& omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE_LASTPRIVATE))
count++;
}
for (i = 0; i < count; i++)
{
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
insert_decl_map (&outer_ctx->cb, temp, temp);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_taskreg_clauses (stmt);
gimple_omp_taskreg_set_clauses (stmt, c);
}
}
if (msk == GF_OMP_FOR_KIND_TASKLOOP
&& omp_find_clause (gimple_omp_task_clauses (stmt),
OMP_CLAUSE_REDUCTION))
{
tree type = build_pointer_type (pointer_sized_int_node);
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
insert_decl_map (&outer_ctx->cb, temp, temp);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_task_clauses (stmt);
gimple_omp_task_set_clauses (stmt, c);
}
}
/* Scan an OpenMP parallel directive. */
static void
scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
/* Ignore parallel directives with empty bodies, unless there
are copyin clauses. */
if (optimize > 0
&& empty_body_p (gimple_omp_body (stmt))
&& omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE_COPYIN) == NULL)
{
gsi_replace (gsi, gimple_build_nop (), false);
return;
}
if (gimple_omp_parallel_combined_p (stmt))
add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_FOR, stmt, outer_ctx);
for (tree c = omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE_REDUCTION);
c; c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE_REDUCTION))
if (OMP_CLAUSE_REDUCTION_TASK (c))
{
tree type = build_pointer_type (pointer_sized_int_node);
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
if (outer_ctx)
insert_decl_map (&outer_ctx->cb, temp, temp);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
gimple_omp_parallel_set_clauses (stmt, c);
break;
}
else if (OMP_CLAUSE_CHAIN (c) == NULL_TREE)
break;
ctx = new_omp_context (stmt, outer_ctx);
taskreg_contexts.safe_push (ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
if (!gimple_omp_parallel_grid_phony (stmt))
{
create_omp_child_function (ctx, false);
gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
}
scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
}
/* Scan an OpenMP task directive. */
static void
scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
{
omp_context *ctx;
tree name, t;
gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
/* Ignore task directives with empty bodies, unless they have depend
clause. */
if (optimize > 0
&& gimple_omp_body (stmt)
&& empty_body_p (gimple_omp_body (stmt))
&& !omp_find_clause (gimple_omp_task_clauses (stmt), OMP_CLAUSE_DEPEND))
{
gsi_replace (gsi, gimple_build_nop (), false);
return;
}
if (gimple_omp_task_taskloop_p (stmt))
add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_TASKLOOP, stmt, outer_ctx);
ctx = new_omp_context (stmt, outer_ctx);
if (gimple_omp_task_taskwait_p (stmt))
{
scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
return;
}
taskreg_contexts.safe_push (ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
create_omp_child_function (ctx, false);
gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
if (ctx->srecord_type)
{
name = create_tmp_var_name (".omp_data_a");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->srecord_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->srecord_type) = name;
TYPE_ARTIFICIAL (ctx->srecord_type) = 1;
create_omp_child_function (ctx, true);
}
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
{
ctx->record_type = ctx->receiver_decl = NULL;
t = build_int_cst (long_integer_type_node, 0);
gimple_omp_task_set_arg_size (stmt, t);
t = build_int_cst (long_integer_type_node, 1);
gimple_omp_task_set_arg_align (stmt, t);
}
}
/* Helper function for finish_taskreg_scan, called through walk_tree.
If maybe_lookup_decl_in_outer_context returns non-NULL for some
tree, replace it in the expression. */
static tree
finish_taskreg_remap (tree *tp, int *walk_subtrees, void *data)
{
if (VAR_P (*tp))
{
omp_context *ctx = (omp_context *) data;
tree t = maybe_lookup_decl_in_outer_ctx (*tp, ctx);
if (t != *tp)
{
if (DECL_HAS_VALUE_EXPR_P (t))
t = unshare_expr (DECL_VALUE_EXPR (t));
*tp = t;
}
*walk_subtrees = 0;
}
else if (IS_TYPE_OR_DECL_P (*tp))
*walk_subtrees = 0;
return NULL_TREE;
}
/* If any decls have been made addressable during scan_omp,
adjust their fields if needed, and layout record types
of parallel/task constructs. */
static void
finish_taskreg_scan (omp_context *ctx)
{
if (ctx->record_type == NULL_TREE)
return;
/* If any task_shared_vars were needed, verify all
OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK,TEAMS}
statements if use_pointer_for_field hasn't changed
because of that. If it did, update field types now. */
if (task_shared_vars)
{
tree c;
for (c = gimple_omp_taskreg_clauses (ctx->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& !OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
tree decl = OMP_CLAUSE_DECL (c);
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
continue;
if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
|| !use_pointer_for_field (decl, ctx))
continue;
tree field = lookup_field (decl, ctx);
if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
&& TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
continue;
TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
TREE_THIS_VOLATILE (field) = 0;
DECL_USER_ALIGN (field) = 0;
SET_DECL_ALIGN (field, TYPE_ALIGN (TREE_TYPE (field)));
if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
SET_TYPE_ALIGN (ctx->record_type, DECL_ALIGN (field));
if (ctx->srecord_type)
{
tree sfield = lookup_sfield (decl, ctx);
TREE_TYPE (sfield) = TREE_TYPE (field);
TREE_THIS_VOLATILE (sfield) = 0;
DECL_USER_ALIGN (sfield) = 0;
SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
SET_TYPE_ALIGN (ctx->srecord_type, DECL_ALIGN (sfield));
}
}
}
if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
{
tree clauses = gimple_omp_parallel_clauses (ctx->stmt);
tree c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
if (c)
{
/* Move the _reductemp_ clause first. GOMP_parallel_reductions
expects to find it at the start of data. */
tree f = lookup_field (OMP_CLAUSE_DECL (c), ctx);
tree *p = &TYPE_FIELDS (ctx->record_type);
while (*p)
if (*p == f)
{
*p = DECL_CHAIN (*p);
break;
}
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f) = TYPE_FIELDS (ctx->record_type);
TYPE_FIELDS (ctx->record_type) = f;
}
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
{
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
}
else
{
location_t loc = gimple_location (ctx->stmt);
tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
/* Move VLA fields to the end. */
p = &TYPE_FIELDS (ctx->record_type);
while (*p)
if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
|| ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
{
*q = *p;
*p = TREE_CHAIN (*p);
TREE_CHAIN (*q) = NULL_TREE;
q = &TREE_CHAIN (*q);
}
else
p = &DECL_CHAIN (*p);
*p = vla_fields;
if (gimple_omp_task_taskloop_p (ctx->stmt))
{
/* Move fields corresponding to first and second _looptemp_
clause first. There are filled by GOMP_taskloop
and thus need to be in specific positions. */
tree clauses = gimple_omp_task_clauses (ctx->stmt);
tree c1 = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
tree c2 = omp_find_clause (OMP_CLAUSE_CHAIN (c1),
OMP_CLAUSE__LOOPTEMP_);
tree c3 = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
tree f1 = lookup_field (OMP_CLAUSE_DECL (c1), ctx);
tree f2 = lookup_field (OMP_CLAUSE_DECL (c2), ctx);
tree f3 = c3 ? lookup_field (OMP_CLAUSE_DECL (c3), ctx) : NULL_TREE;
p = &TYPE_FIELDS (ctx->record_type);
while (*p)
if (*p == f1 || *p == f2 || *p == f3)
*p = DECL_CHAIN (*p);
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f1) = f2;
if (c3)
{
DECL_CHAIN (f2) = f3;
DECL_CHAIN (f3) = TYPE_FIELDS (ctx->record_type);
}
else
DECL_CHAIN (f2) = TYPE_FIELDS (ctx->record_type);
TYPE_FIELDS (ctx->record_type) = f1;
if (ctx->srecord_type)
{
f1 = lookup_sfield (OMP_CLAUSE_DECL (c1), ctx);
f2 = lookup_sfield (OMP_CLAUSE_DECL (c2), ctx);
if (c3)
f3 = lookup_sfield (OMP_CLAUSE_DECL (c3), ctx);
p = &TYPE_FIELDS (ctx->srecord_type);
while (*p)
if (*p == f1 || *p == f2 || *p == f3)
*p = DECL_CHAIN (*p);
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f1) = f2;
DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type);
if (c3)
{
DECL_CHAIN (f2) = f3;
DECL_CHAIN (f3) = TYPE_FIELDS (ctx->srecord_type);
}
else
DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type);
TYPE_FIELDS (ctx->srecord_type) = f1;
}
}
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
if (ctx->srecord_type)
layout_type (ctx->srecord_type);
tree t = fold_convert_loc (loc, long_integer_type_node,
TYPE_SIZE_UNIT (ctx->record_type));
if (TREE_CODE (t) != INTEGER_CST)
{
t = unshare_expr (t);
walk_tree (&t, finish_taskreg_remap, ctx, NULL);
}
gimple_omp_task_set_arg_size (ctx->stmt, t);
t = build_int_cst (long_integer_type_node,
TYPE_ALIGN_UNIT (ctx->record_type));
gimple_omp_task_set_arg_align (ctx->stmt, t);
}
}
/* Find the enclosing offload context. */
static omp_context *
enclosing_target_ctx (omp_context *ctx)
{
for (; ctx; ctx = ctx->outer)
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET)
break;
return ctx;
}
/* Return true if ctx is part of an oacc kernels region. */
static bool
ctx_in_oacc_kernels_region (omp_context *ctx)
{
for (;ctx != NULL; ctx = ctx->outer)
{
gimple *stmt = ctx->stmt;
if (gimple_code (stmt) == GIMPLE_OMP_TARGET
&& gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
return true;
}
return false;
}
/* Check the parallelism clauses inside a kernels regions.
Until kernels handling moves to use the same loop indirection
scheme as parallel, we need to do this checking early. */
static unsigned
check_oacc_kernel_gwv (gomp_for *stmt, omp_context *ctx)
{
bool checking = true;
unsigned outer_mask = 0;
unsigned this_mask = 0;
bool has_seq = false, has_auto = false;
if (ctx->outer)
outer_mask = check_oacc_kernel_gwv (NULL, ctx->outer);
if (!stmt)
{
checking = false;
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR)
return outer_mask;
stmt = as_a <gomp_for *> (ctx->stmt);
}
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_GANG);
break;
case OMP_CLAUSE_WORKER:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_WORKER);
break;
case OMP_CLAUSE_VECTOR:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_VECTOR);
break;
case OMP_CLAUSE_SEQ:
has_seq = true;
break;
case OMP_CLAUSE_AUTO:
has_auto = true;
break;
default:
break;
}
}
if (checking)
{
if (has_seq && (this_mask || has_auto))
error_at (gimple_location (stmt), "%<seq%> overrides other"
" OpenACC loop specifiers");
else if (has_auto && this_mask)
error_at (gimple_location (stmt), "%<auto%> conflicts with other"
" OpenACC loop specifiers");
if (this_mask & outer_mask)
error_at (gimple_location (stmt), "inner loop uses same"
" OpenACC parallelism as containing loop");
}
return outer_mask | this_mask;
}
/* Scan a GIMPLE_OMP_FOR. */
static omp_context *
scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
size_t i;
tree clauses = gimple_omp_for_clauses (stmt);
ctx = new_omp_context (stmt, outer_ctx);
if (is_gimple_omp_oacc (stmt))
{
omp_context *tgt = enclosing_target_ctx (outer_ctx);
if (!(tgt && is_oacc_kernels (tgt)))
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
tree c_op0;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
c_op0 = OMP_CLAUSE_GANG_EXPR (c);
break;
case OMP_CLAUSE_WORKER:
c_op0 = OMP_CLAUSE_WORKER_EXPR (c);
break;
case OMP_CLAUSE_VECTOR:
c_op0 = OMP_CLAUSE_VECTOR_EXPR (c);
break;
default:
continue;
}
if (c_op0)
{
error_at (OMP_CLAUSE_LOCATION (c),
"argument not permitted on %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
if (tgt)
inform (gimple_location (tgt->stmt),
"enclosing parent compute construct");
else if (oacc_get_fn_attrib (current_function_decl))
inform (DECL_SOURCE_LOCATION (current_function_decl),
"enclosing routine");
else
gcc_unreachable ();
}
}
if (tgt && is_oacc_kernels (tgt))
check_oacc_kernel_gwv (stmt, ctx);
/* Collect all variables named in reductions on this loop. Ensure
that, if this loop has a reduction on some variable v, and there is
a reduction on v somewhere in an outer context, then there is a
reduction on v on all intervening loops as well. */
tree local_reduction_clauses = NULL;
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
local_reduction_clauses
= tree_cons (NULL, c, local_reduction_clauses);
}
if (ctx->outer_reduction_clauses == NULL && ctx->outer != NULL)
ctx->outer_reduction_clauses
= chainon (unshare_expr (ctx->outer->local_reduction_clauses),
ctx->outer->outer_reduction_clauses);
tree outer_reduction_clauses = ctx->outer_reduction_clauses;
tree local_iter = local_reduction_clauses;
for (; local_iter; local_iter = TREE_CHAIN (local_iter))
{
tree local_clause = TREE_VALUE (local_iter);
tree local_var = OMP_CLAUSE_DECL (local_clause);
tree_code local_op = OMP_CLAUSE_REDUCTION_CODE (local_clause);
bool have_outer_reduction = false;
tree ctx_iter = outer_reduction_clauses;
for (; ctx_iter; ctx_iter = TREE_CHAIN (ctx_iter))
{
tree outer_clause = TREE_VALUE (ctx_iter);
tree outer_var = OMP_CLAUSE_DECL (outer_clause);
tree_code outer_op = OMP_CLAUSE_REDUCTION_CODE (outer_clause);
if (outer_var == local_var && outer_op != local_op)
{
warning_at (OMP_CLAUSE_LOCATION (local_clause), 0,
"conflicting reduction operations for %qE",
local_var);
inform (OMP_CLAUSE_LOCATION (outer_clause),
"location of the previous reduction for %qE",
outer_var);
}
if (outer_var == local_var)
{
have_outer_reduction = true;
break;
}
}
if (have_outer_reduction)
{
/* There is a reduction on outer_var both on this loop and on
some enclosing loop. Walk up the context tree until such a
loop with a reduction on outer_var is found, and complain
about all intervening loops that do not have such a
reduction. */
struct omp_context *curr_loop = ctx->outer;
bool found = false;
while (curr_loop != NULL)
{
tree curr_iter = curr_loop->local_reduction_clauses;
for (; curr_iter; curr_iter = TREE_CHAIN (curr_iter))
{
tree curr_clause = TREE_VALUE (curr_iter);
tree curr_var = OMP_CLAUSE_DECL (curr_clause);
if (curr_var == local_var)
{
found = true;
break;
}
}
if (!found)
warning_at (gimple_location (curr_loop->stmt), 0,
"nested loop in reduction needs "
"reduction clause for %qE",
local_var);
else
break;
curr_loop = curr_loop->outer;
}
}
}
ctx->local_reduction_clauses = local_reduction_clauses;
ctx->outer_reduction_clauses
= chainon (unshare_expr (ctx->local_reduction_clauses),
ctx->outer_reduction_clauses);
if (tgt && is_oacc_kernels (tgt))
{
/* Strip out reductions, as they are not handled yet. */
tree *prev_ptr = &clauses;
while (tree probe = *prev_ptr)
{
tree *next_ptr = &OMP_CLAUSE_CHAIN (probe);
if (OMP_CLAUSE_CODE (probe) == OMP_CLAUSE_REDUCTION)
*prev_ptr = *next_ptr;
else
prev_ptr = next_ptr;
}
gimple_omp_for_set_clauses (stmt, clauses);
}
}
scan_sharing_clauses (clauses, ctx);
scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
{
scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
}
scan_omp (gimple_omp_body_ptr (stmt), ctx);
return ctx;
}
/* Duplicate #pragma omp simd, one for SIMT, another one for SIMD. */
static void
scan_omp_simd (gimple_stmt_iterator *gsi, gomp_for *stmt,
omp_context *outer_ctx)
{
gbind *bind = gimple_build_bind (NULL, NULL, NULL);
gsi_replace (gsi, bind, false);
gimple_seq seq = NULL;
gimple *g = gimple_build_call_internal (IFN_GOMP_USE_SIMT, 0);
tree cond = create_tmp_var_raw (integer_type_node);
DECL_CONTEXT (cond) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (cond) = 1;
gimple_bind_set_vars (bind, cond);
gimple_call_set_lhs (g, cond);
gimple_seq_add_stmt (&seq, g);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
tree lab2 = create_artificial_label (UNKNOWN_LOCATION);
tree lab3 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, cond, integer_zero_node, lab1, lab2);
gimple_seq_add_stmt (&seq, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (&seq, g);
gimple_seq new_seq = copy_gimple_seq_and_replace_locals (stmt);
gomp_for *new_stmt = as_a <gomp_for *> (new_seq);
tree clause = build_omp_clause (gimple_location (stmt), OMP_CLAUSE__SIMT_);
OMP_CLAUSE_CHAIN (clause) = gimple_omp_for_clauses (new_stmt);
gimple_omp_for_set_clauses (new_stmt, clause);
gimple_seq_add_stmt (&seq, new_stmt);
g = gimple_build_goto (lab3);
gimple_seq_add_stmt (&seq, g);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (&seq, g);
gimple_seq_add_stmt (&seq, stmt);
g = gimple_build_label (lab3);
gimple_seq_add_stmt (&seq, g);
gimple_bind_set_body (bind, seq);
update_stmt (bind);
scan_omp_for (new_stmt, outer_ctx);
scan_omp_for (stmt, outer_ctx)->simt_stmt = new_stmt;
}
static tree omp_find_scan (gimple_stmt_iterator *, bool *,
struct walk_stmt_info *);
static omp_context *maybe_lookup_ctx (gimple *);
/* Duplicate #pragma omp simd, one for the scan input phase loop and one
for scan phase loop. */
static void
scan_omp_simd_scan (gimple_stmt_iterator *gsi, gomp_for *stmt,
omp_context *outer_ctx)
{
/* The only change between inclusive and exclusive scan will be
within the first simd loop, so just use inclusive in the
worksharing loop. */
outer_ctx->scan_inclusive = true;
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_INCLUSIVE);
OMP_CLAUSE_DECL (c) = integer_zero_node;
gomp_scan *input_stmt = gimple_build_omp_scan (NULL, NULL_TREE);
gomp_scan *scan_stmt = gimple_build_omp_scan (NULL, c);
gsi_replace (gsi, input_stmt, false);
gimple_seq input_body = NULL;
gimple_seq_add_stmt (&input_body, stmt);
gsi_insert_after (gsi, scan_stmt, GSI_NEW_STMT);
gimple_stmt_iterator input1_gsi = gsi_none ();
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input1_gsi;
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), omp_find_scan, NULL, &wi);
gcc_assert (!gsi_end_p (input1_gsi));
gimple *input_stmt1 = gsi_stmt (input1_gsi);
gsi_next (&input1_gsi);
gimple *scan_stmt1 = gsi_stmt (input1_gsi);
gcc_assert (scan_stmt1 && gimple_code (scan_stmt1) == GIMPLE_OMP_SCAN);
c = gimple_omp_scan_clauses (as_a <gomp_scan *> (scan_stmt1));
if (c && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_EXCLUSIVE)
std::swap (input_stmt1, scan_stmt1);
gimple_seq input_body1 = gimple_omp_body (input_stmt1);
gimple_omp_set_body (input_stmt1, NULL);
gimple_seq scan_body = copy_gimple_seq_and_replace_locals (stmt);
gomp_for *new_stmt = as_a <gomp_for *> (scan_body);
gimple_omp_set_body (input_stmt1, input_body1);
gimple_omp_set_body (scan_stmt1, NULL);
gimple_stmt_iterator input2_gsi = gsi_none ();
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input2_gsi;
walk_gimple_seq_mod (gimple_omp_body_ptr (new_stmt), omp_find_scan,
NULL, &wi);
gcc_assert (!gsi_end_p (input2_gsi));
gimple *input_stmt2 = gsi_stmt (input2_gsi);
gsi_next (&input2_gsi);
gimple *scan_stmt2 = gsi_stmt (input2_gsi);
gcc_assert (scan_stmt2 && gimple_code (scan_stmt2) == GIMPLE_OMP_SCAN);
if (c && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_EXCLUSIVE)
std::swap (input_stmt2, scan_stmt2);
gimple_omp_set_body (input_stmt2, NULL);
gimple_omp_set_body (input_stmt, input_body);
gimple_omp_set_body (scan_stmt, scan_body);
omp_context *ctx = new_omp_context (input_stmt, outer_ctx);
scan_omp (gimple_omp_body_ptr (input_stmt), ctx);
ctx = new_omp_context (scan_stmt, outer_ctx);
scan_omp (gimple_omp_body_ptr (scan_stmt), ctx);
maybe_lookup_ctx (new_stmt)->for_simd_scan_phase = true;
}
/* Scan an OpenMP sections directive. */
static void
scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
ctx = new_omp_context (stmt, outer_ctx);
scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
}
/* Scan an OpenMP single directive. */
static void
scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
ctx = new_omp_context (stmt, outer_ctx);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_copy_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
TYPE_NAME (ctx->record_type) = name;
scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = NULL;
else
layout_type (ctx->record_type);
}
/* Scan a GIMPLE_OMP_TARGET. */
static void
scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
bool offloaded = is_gimple_omp_offloaded (stmt);
tree clauses = gimple_omp_target_clauses (stmt);
ctx = new_omp_context (stmt, outer_ctx);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_t");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
if (offloaded)
{
create_omp_child_function (ctx, false);
gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
}
scan_sharing_clauses (clauses, ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
else
{
TYPE_FIELDS (ctx->record_type)
= nreverse (TYPE_FIELDS (ctx->record_type));
if (flag_checking)
{
unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
for (tree field = TYPE_FIELDS (ctx->record_type);
field;
field = DECL_CHAIN (field))
gcc_assert (DECL_ALIGN (field) == align);
}
layout_type (ctx->record_type);
if (offloaded)
fixup_child_record_type (ctx);
}
}
/* Scan an OpenMP teams directive. */
static void
scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
{
omp_context *ctx = new_omp_context (stmt, outer_ctx);
if (!gimple_omp_teams_host (stmt))
{
scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
return;
}
taskreg_contexts.safe_push (ctx);
gcc_assert (taskreg_nesting_level == 1);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
tree name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
create_omp_child_function (ctx, false);
gimple_omp_teams_set_child_fn (stmt, ctx->cb.dst_fn);
scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
}
/* Check nesting restrictions. */
static bool
check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
{
tree c;
if (ctx && gimple_code (ctx->stmt) == GIMPLE_OMP_GRID_BODY)
/* GRID_BODY is an artificial construct, nesting rules will be checked in
the original copy of its contents. */
return true;
/* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
inside an OpenACC CTX. */
if (!(is_gimple_omp (stmt)
&& is_gimple_omp_oacc (stmt))
/* Except for atomic codes that we share with OpenMP. */
&& !(gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE))
{
if (oacc_get_fn_attrib (cfun->decl) != NULL)
{
error_at (gimple_location (stmt),
"non-OpenACC construct inside of OpenACC routine");
return false;
}
else
for (omp_context *octx = ctx; octx != NULL; octx = octx->outer)
if (is_gimple_omp (octx->stmt)
&& is_gimple_omp_oacc (octx->stmt))
{
error_at (gimple_location (stmt),
"non-OpenACC construct inside of OpenACC region");
return false;
}
}
if (ctx != NULL)
{
if (gimple_code (ctx->stmt) == GIMPLE_OMP_SCAN
&& ctx->outer
&& gimple_code (ctx->outer->stmt) == GIMPLE_OMP_FOR)
ctx = ctx->outer;
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD
&& !ctx->loop_p)
{
c = NULL_TREE;
if (ctx->order_concurrent
&& (gimple_code (stmt) == GIMPLE_OMP_ORDERED
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE))
{
error_at (gimple_location (stmt),
"OpenMP constructs other than %<parallel%>, %<loop%>"
" or %<simd%> may not be nested inside a region with"
" the %<order(concurrent)%> clause");
return false;
}
if (gimple_code (stmt) == GIMPLE_OMP_ORDERED)
{
c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
if (omp_find_clause (c, OMP_CLAUSE_SIMD))
{
if (omp_find_clause (c, OMP_CLAUSE_THREADS)
&& (ctx->outer == NULL
|| !gimple_omp_for_combined_into_p (ctx->stmt)
|| gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR
|| (gimple_omp_for_kind (ctx->outer->stmt)
!= GF_OMP_FOR_KIND_FOR)
|| !gimple_omp_for_combined_p (ctx->outer->stmt)))
{
error_at (gimple_location (stmt),
"%<ordered simd threads%> must be closely "
"nested inside of %<for simd%> region");
return false;
}
return true;
}
}
else if (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE
|| gimple_code (stmt) == GIMPLE_OMP_SCAN)
return true;
else if (gimple_code (stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
return true;
error_at (gimple_location (stmt),
"OpenMP constructs other than "
"%<ordered simd%>, %<simd%>, %<loop%> or %<atomic%> may "
"not be nested inside %<simd%> region");
return false;
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
{
if ((gimple_code (stmt) != GIMPLE_OMP_FOR
|| (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_DISTRIBUTE
&& gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_GRID_LOOP
&& omp_find_clause (gimple_omp_for_clauses (stmt),
OMP_CLAUSE_BIND) == NULL_TREE))
&& gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
{
error_at (gimple_location (stmt),
"only %<distribute%>, %<parallel%> or %<loop%> "
"regions are allowed to be strictly nested inside "
"%<teams%> region");
return false;
}
}
else if (ctx->order_concurrent
&& gimple_code (stmt) != GIMPLE_OMP_PARALLEL
&& (gimple_code (stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_SIMD)
&& gimple_code (stmt) != GIMPLE_OMP_SCAN)
{
if (ctx->loop_p)
error_at (gimple_location (stmt),
"OpenMP constructs other than %<parallel%>, %<loop%> or "
"%<simd%> may not be nested inside a %<loop%> region");
else
error_at (gimple_location (stmt),
"OpenMP constructs other than %<parallel%>, %<loop%> or "
"%<simd%> may not be nested inside a region with "
"the %<order(concurrent)%> clause");
return false;
}
}
switch (gimple_code (stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_SIMD)
return true;
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
{
if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
{
error_at (gimple_location (stmt),
"%<distribute%> region must be strictly nested "
"inside %<teams%> construct");
return false;
}
return true;
}
/* We split taskloop into task and nested taskloop in it. */
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP)
return true;
/* For now, hope this will change and loop bind(parallel) will not
be allowed in lots of contexts. */
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR
&& omp_find_clause (gimple_omp_for_clauses (stmt), OMP_CLAUSE_BIND))
return true;
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
{
bool ok = false;
if (ctx)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
ok = (gimple_omp_for_kind (ctx->stmt)
== GF_OMP_FOR_KIND_OACC_LOOP);
break;
case GIMPLE_OMP_TARGET:
switch (gimple_omp_target_kind (ctx->stmt))
{
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
case GF_OMP_TARGET_KIND_OACC_SERIAL:
ok = true;
break;
default:
break;
}
default:
break;
}
else if (oacc_get_fn_attrib (current_function_decl))
ok = true;
if (!ok)
{
error_at (gimple_location (stmt),
"OpenACC loop directive must be associated with"
" an OpenACC compute region");
return false;
}
}
/* FALLTHRU */
case GIMPLE_CALL:
if (is_gimple_call (stmt)
&& (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
|| DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCELLATION_POINT))
{
const char *bad = NULL;
const char *kind = NULL;
const char *construct
= (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL)
? "cancel"
: "cancellation point";
if (ctx == NULL)
{
error_at (gimple_location (stmt), "orphaned %qs construct",
construct);
return false;
}
switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
? tree_to_shwi (gimple_call_arg (stmt, 0))
: 0)
{
case 1:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
bad = "parallel";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
ctx->cancellable = true;
kind = "parallel";
break;
case 2:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
bad = "for";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
{
ctx->cancellable = true;
if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<cancel for%> inside "
"%<nowait%> for construct");
if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED))
warning_at (gimple_location (stmt), 0,
"%<cancel for%> inside "
"%<ordered%> for construct");
}
kind = "for";
break;
case 4:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
&& gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
bad = "sections";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
{
if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
{
ctx->cancellable = true;
if (omp_find_clause (gimple_omp_sections_clauses
(ctx->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<cancel sections%> inside "
"%<nowait%> sections construct");
}
else
{
gcc_assert (ctx->outer
&& gimple_code (ctx->outer->stmt)
== GIMPLE_OMP_SECTIONS);
ctx->outer->cancellable = true;
if (omp_find_clause (gimple_omp_sections_clauses
(ctx->outer->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<cancel sections%> inside "
"%<nowait%> sections construct");
}
}
kind = "sections";
break;
case 8:
if (!is_task_ctx (ctx)
&& (!is_taskloop_ctx (ctx)
|| ctx->outer == NULL
|| !is_task_ctx (ctx->outer)))
bad = "task";
else
{
for (omp_context *octx = ctx->outer;
octx; octx = octx->outer)
{
switch (gimple_code (octx->stmt))
{
case GIMPLE_OMP_TASKGROUP:
break;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (octx->stmt)
!= GF_OMP_TARGET_KIND_REGION)
continue;
/* FALLTHRU */
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
error_at (gimple_location (stmt),
"%<%s taskgroup%> construct not closely "
"nested inside of %<taskgroup%> region",
construct);
return false;
case GIMPLE_OMP_TASK:
if (gimple_omp_task_taskloop_p (octx->stmt)
&& octx->outer
&& is_taskloop_ctx (octx->outer))
{
tree clauses
= gimple_omp_for_clauses (octx->outer->stmt);
if (!omp_find_clause (clauses, OMP_CLAUSE_NOGROUP))
break;
}
continue;
default:
continue;
}
break;
}
ctx->cancellable = true;
}
kind = "taskgroup";
break;
default:
error_at (gimple_location (stmt), "invalid arguments");
return false;
}
if (bad)
{
error_at (gimple_location (stmt),
"%<%s %s%> construct not closely nested inside of %qs",
construct, kind, bad);
return false;
}
}
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
&& gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
break;
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_CRITICAL:
if (is_gimple_call (stmt))
{
if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
!= BUILT_IN_GOMP_BARRIER)
return true;
error_at (gimple_location (stmt),
"barrier region may not be closely nested inside "
"of work-sharing, %<loop%>, %<critical%>, "
"%<ordered%>, %<master%>, explicit %<task%> or "
"%<taskloop%> region");
return false;
}
error_at (gimple_location (stmt),
"work-sharing region may not be closely nested inside "
"of work-sharing, %<loop%>, %<critical%>, %<ordered%>, "
"%<master%>, explicit %<task%> or %<taskloop%> region");
return false;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_REGION)
return true;
break;
default:
break;
}
break;
case GIMPLE_OMP_MASTER:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
&& gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
break;
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TASK:
error_at (gimple_location (stmt),
"%<master%> region may not be closely nested inside "
"of work-sharing, %<loop%>, explicit %<task%> or "
"%<taskloop%> region");
return false;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_REGION)
return true;
break;
default:
break;
}
break;
case GIMPLE_OMP_TASK:
for (c = gimple_omp_task_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
|| OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
{
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
error_at (OMP_CLAUSE_LOCATION (c),
"%<depend(%s)%> is only allowed in %<omp ordered%>",
kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
return false;
}
break;
case GIMPLE_OMP_ORDERED:
for (c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
{
gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREADS
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SIMD);
continue;
}
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
if (kind == OMP_CLAUSE_DEPEND_SOURCE
|| kind == OMP_CLAUSE_DEPEND_SINK)
{
tree oclause;
/* Look for containing ordered(N) loop. */
if (ctx == NULL
|| gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| (oclause
= omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED)) == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> construct with %<depend%> clause "
"must be closely nested inside an %<ordered%> "
"loop");
return false;
}
else if (OMP_CLAUSE_ORDERED_EXPR (oclause) == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> construct with %<depend%> clause "
"must be closely nested inside a loop with "
"%<ordered%> clause with a parameter");
return false;
}
}
else
{
error_at (OMP_CLAUSE_LOCATION (c),
"invalid depend kind in omp %<ordered%> %<depend%>");
return false;
}
}
c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
if (omp_find_clause (c, OMP_CLAUSE_SIMD))
{
/* ordered simd must be closely nested inside of simd region,
and simd region must not encounter constructs other than
ordered simd, therefore ordered simd may be either orphaned,
or ctx->stmt must be simd. The latter case is handled already
earlier. */
if (ctx != NULL)
{
error_at (gimple_location (stmt),
"%<ordered%> %<simd%> must be closely nested inside "
"%<simd%> region");
return false;
}
}
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_ORDERED:
ordered_in_taskloop:
error_at (gimple_location (stmt),
"%<ordered%> region may not be closely nested inside "
"of %<critical%>, %<ordered%>, explicit %<task%> or "
"%<taskloop%> region");
return false;
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP)
goto ordered_in_taskloop;
tree o;
o = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED);
if (o == NULL)
{
error_at (gimple_location (stmt),
"%<ordered%> region must be closely nested inside "
"a loop region with an %<ordered%> clause");
return false;
}
if (OMP_CLAUSE_ORDERED_EXPR (o) != NULL_TREE
&& omp_find_clause (c, OMP_CLAUSE_DEPEND) == NULL_TREE)
{
error_at (gimple_location (stmt),
"%<ordered%> region without %<depend%> clause may "
"not be closely nested inside a loop region with "
"an %<ordered%> clause with a parameter");
return false;
}
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
!= GF_OMP_TARGET_KIND_REGION)
break;
/* FALLTHRU */
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
error_at (gimple_location (stmt),
"%<ordered%> region must be closely nested inside "
"a loop region with an %<ordered%> clause");
return false;
default:
break;
}
break;
case GIMPLE_OMP_CRITICAL:
{
tree this_stmt_name
= gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
for (; ctx != NULL; ctx = ctx->outer)
if (gomp_critical *other_crit
= dyn_cast <gomp_critical *> (ctx->stmt))
if (this_stmt_name == gimple_omp_critical_name (other_crit))
{
error_at (gimple_location (stmt),
"%<critical%> region may not be nested inside "
"a %<critical%> region with the same name");
return false;
}
}
break;
case GIMPLE_OMP_TEAMS:
if (ctx == NULL)
break;
else if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
|| (gimple_omp_target_kind (ctx->stmt)
!= GF_OMP_TARGET_KIND_REGION))
{
/* Teams construct can appear either strictly nested inside of
target construct with no intervening stmts, or can be encountered
only by initial task (so must not appear inside any OpenMP
construct. */
error_at (gimple_location (stmt),
"%<teams%> construct must be closely nested inside of "
"%<target%> construct or not nested in any OpenMP "
"construct");
return false;
}
break;
case GIMPLE_OMP_TARGET:
for (c = gimple_omp_target_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
|| OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
{
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
error_at (OMP_CLAUSE_LOCATION (c),
"%<depend(%s)%> is only allowed in %<omp ordered%>",
kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
return false;
}
if (is_gimple_omp_offloaded (stmt)
&& oacc_get_fn_attrib (cfun->decl) != NULL)
{
error_at (gimple_location (stmt),
"OpenACC region inside of OpenACC routine, nested "
"parallelism not supported yet");
return false;
}
for (; ctx != NULL; ctx = ctx->outer)
{
if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
{
if (is_gimple_omp (stmt)
&& is_gimple_omp_oacc (stmt)
&& is_gimple_omp (ctx->stmt))
{
error_at (gimple_location (stmt),
"OpenACC construct inside of non-OpenACC region");
return false;
}
continue;
}
const char *stmt_name, *ctx_stmt_name;
switch (gimple_omp_target_kind (stmt))
{
case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break;
case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break;
case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break;
case GF_OMP_TARGET_KIND_ENTER_DATA:
stmt_name = "target enter data"; break;
case GF_OMP_TARGET_KIND_EXIT_DATA:
stmt_name = "target exit data"; break;
case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break;
case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break;
case GF_OMP_TARGET_KIND_OACC_SERIAL: stmt_name = "serial"; break;
case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break;
case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break;
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
stmt_name = "enter/exit data"; break;
case GF_OMP_TARGET_KIND_OACC_DECLARE: stmt_name = "declare"; break;
case GF_OMP_TARGET_KIND_OACC_HOST_DATA: stmt_name = "host_data";
break;
default: gcc_unreachable ();
}
switch (gimple_omp_target_kind (ctx->stmt))
{
case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break;
case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break;
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
ctx_stmt_name = "parallel"; break;
case GF_OMP_TARGET_KIND_OACC_KERNELS:
ctx_stmt_name = "kernels"; break;
case GF_OMP_TARGET_KIND_OACC_SERIAL:
ctx_stmt_name = "serial"; break;
case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break;
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
ctx_stmt_name = "host_data"; break;
default: gcc_unreachable ();
}
/* OpenACC/OpenMP mismatch? */
if (is_gimple_omp_oacc (stmt)
!= is_gimple_omp_oacc (ctx->stmt))
{
error_at (gimple_location (stmt),
"%s %qs construct inside of %s %qs region",
(is_gimple_omp_oacc (stmt)
? "OpenACC" : "OpenMP"), stmt_name,
(is_gimple_omp_oacc (ctx->stmt)
? "OpenACC" : "OpenMP"), ctx_stmt_name);
return false;
}
if (is_gimple_omp_offloaded (ctx->stmt))
{
/* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
if (is_gimple_omp_oacc (ctx->stmt))
{
error_at (gimple_location (stmt),
"%qs construct inside of %qs region",
stmt_name, ctx_stmt_name);
return false;
}
else
{
warning_at (gimple_location (stmt), 0,
"%qs construct inside of %qs region",
stmt_name, ctx_stmt_name);
}
}
}
break;
default:
break;
}
return true;
}
/* Helper function scan_omp.
Callback for walk_tree or operators in walk_gimple_stmt used to
scan for OMP directives in TP. */
static tree
scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
omp_context *ctx = (omp_context *) wi->info;
tree t = *tp;
switch (TREE_CODE (t))
{
case VAR_DECL:
case PARM_DECL:
case LABEL_DECL:
case RESULT_DECL:
if (ctx)
{
tree repl = remap_decl (t, &ctx->cb);
gcc_checking_assert (TREE_CODE (repl) != ERROR_MARK);
*tp = repl;
}
break;
default:
if (ctx && TYPE_P (t))
*tp = remap_type (t, &ctx->cb);
else if (!DECL_P (t))
{
*walk_subtrees = 1;
if (ctx)
{
tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
if (tem != TREE_TYPE (t))
{
if (TREE_CODE (t) == INTEGER_CST)
*tp = wide_int_to_tree (tem, wi::to_wide (t));
else
TREE_TYPE (t) = tem;
}
}
}
break;
}
return NULL_TREE;
}
/* Return true if FNDECL is a setjmp or a longjmp. */
static bool
setjmp_or_longjmp_p (const_tree fndecl)
{
if (fndecl_built_in_p (fndecl, BUILT_IN_SETJMP)
|| fndecl_built_in_p (fndecl, BUILT_IN_LONGJMP))
return true;
tree declname = DECL_NAME (fndecl);
if (!declname
|| (DECL_CONTEXT (fndecl) != NULL_TREE
&& TREE_CODE (DECL_CONTEXT (fndecl)) != TRANSLATION_UNIT_DECL)
|| !TREE_PUBLIC (fndecl))
return false;
const char *name = IDENTIFIER_POINTER (declname);
return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
}
/* Return true if FNDECL is an omp_* runtime API call. */
static bool
omp_runtime_api_call (const_tree fndecl)
{
tree declname = DECL_NAME (fndecl);
if (!declname
|| (DECL_CONTEXT (fndecl) != NULL_TREE
&& TREE_CODE (DECL_CONTEXT (fndecl)) != TRANSLATION_UNIT_DECL)
|| !TREE_PUBLIC (fndecl))
return false;
const char *name = IDENTIFIER_POINTER (declname);
if (strncmp (name, "omp_", 4) != 0)
return false;
static const char *omp_runtime_apis[] =
{
/* This array has 3 sections. First omp_* calls that don't
have any suffixes. */
"target_alloc",
"target_associate_ptr",
"target_disassociate_ptr",
"target_free",
"target_is_present",
"target_memcpy",
"target_memcpy_rect",
NULL,
/* Now omp_* calls that are available as omp_* and omp_*_. */
"capture_affinity",
"destroy_lock",
"destroy_nest_lock",
"display_affinity",
"get_active_level",
"get_affinity_format",
"get_cancellation",
"get_default_device",
"get_dynamic",
"get_initial_device",
"get_level",
"get_max_active_levels",
"get_max_task_priority",
"get_max_threads",
"get_nested",
"get_num_devices",
"get_num_places",
"get_num_procs",
"get_num_teams",
"get_num_threads",
"get_partition_num_places",
"get_place_num",
"get_proc_bind",
"get_team_num",
"get_thread_limit",
"get_thread_num",
"get_wtick",
"get_wtime",
"in_final",
"in_parallel",
"init_lock",
"init_nest_lock",
"is_initial_device",
"pause_resource",
"pause_resource_all",
"set_affinity_format",
"set_lock",
"set_nest_lock",
"test_lock",
"test_nest_lock",
"unset_lock",
"unset_nest_lock",
NULL,
/* And finally calls available as omp_*, omp_*_ and omp_*_8_. */
"get_ancestor_thread_num",
"get_partition_place_nums",
"get_place_num_procs",
"get_place_proc_ids",
"get_schedule",
"get_team_size",
"set_default_device",
"set_dynamic",
"set_max_active_levels",
"set_nested",
"set_num_threads",
"set_schedule"
};
int mode = 0;
for (unsigned i = 0; i < ARRAY_SIZE (omp_runtime_apis); i++)
{
if (omp_runtime_apis[i] == NULL)
{
mode++;
continue;
}
size_t len = strlen (omp_runtime_apis[i]);
if (strncmp (name + 4, omp_runtime_apis[i], len) == 0
&& (name[4 + len] == '\0'
|| (mode > 0
&& name[4 + len] == '_'
&& (name[4 + len + 1] == '\0'
|| (mode > 1
&& strcmp (name + 4 + len + 1, "8_") == 0)))))
return true;
}
return false;
}
/* Helper function for scan_omp.
Callback for walk_gimple_stmt used to scan for OMP directives in
the current statement in GSI. */
static tree
scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi);
omp_context *ctx = (omp_context *) wi->info;
if (gimple_has_location (stmt))
input_location = gimple_location (stmt);
/* Check the nesting restrictions. */
bool remove = false;
if (is_gimple_omp (stmt))
remove = !check_omp_nesting_restrictions (stmt, ctx);
else if (is_gimple_call (stmt))
{
tree fndecl = gimple_call_fndecl (stmt);
if (fndecl)
{
if (ctx
&& gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD
&& setjmp_or_longjmp_p (fndecl)
&& !ctx->loop_p)
{
remove = true;
error_at (gimple_location (stmt),
"setjmp/longjmp inside %<simd%> construct");
}
else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_GOMP_BARRIER:
case BUILT_IN_GOMP_CANCEL:
case BUILT_IN_GOMP_CANCELLATION_POINT:
case BUILT_IN_GOMP_TASKYIELD:
case BUILT_IN_GOMP_TASKWAIT:
case BUILT_IN_GOMP_TASKGROUP_START:
case BUILT_IN_GOMP_TASKGROUP_END:
remove = !check_omp_nesting_restrictions (stmt, ctx);
break;
default:
break;
}
else if (ctx)
{
omp_context *octx = ctx;
if (gimple_code (ctx->stmt) == GIMPLE_OMP_SCAN && ctx->outer)
octx = ctx->outer;
if (octx->order_concurrent && omp_runtime_api_call (fndecl))
{
remove = true;
error_at (gimple_location (stmt),
"OpenMP runtime API call %qD in a region with "
"%<order(concurrent)%> clause", fndecl);
}
}
}
}
if (remove)
{
stmt = gimple_build_nop ();
gsi_replace (gsi, stmt, false);
}
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_OMP_PARALLEL:
taskreg_nesting_level++;
scan_omp_parallel (gsi, ctx);
taskreg_nesting_level--;
break;
case GIMPLE_OMP_TASK:
taskreg_nesting_level++;
scan_omp_task (gsi, ctx);
taskreg_nesting_level--;
break;
case GIMPLE_OMP_FOR:
if ((gimple_omp_for_kind (as_a <gomp_for *> (stmt))
== GF_OMP_FOR_KIND_SIMD)
&& gimple_omp_for_combined_into_p (stmt)
&& gimple_code (ctx->stmt) != GIMPLE_OMP_SCAN)
{
tree clauses = gimple_omp_for_clauses (as_a <gomp_for *> (stmt));
tree c = omp_find_clause (clauses, OMP_CLAUSE_REDUCTION);
if (c && OMP_CLAUSE_REDUCTION_INSCAN (c) && !seen_error ())
{
scan_omp_simd_scan (gsi, as_a <gomp_for *> (stmt), ctx);
break;
}
}
if ((gimple_omp_for_kind (as_a <gomp_for *> (stmt))
== GF_OMP_FOR_KIND_SIMD)
&& omp_maybe_offloaded_ctx (ctx)
&& omp_max_simt_vf ())
scan_omp_simd (gsi, as_a <gomp_for *> (stmt), ctx);
else
scan_omp_for (as_a <gomp_for *> (stmt), ctx);
break;
case GIMPLE_OMP_SECTIONS:
scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
break;
case GIMPLE_OMP_SINGLE:
scan_omp_single (as_a <gomp_single *> (stmt), ctx);
break;
case GIMPLE_OMP_SCAN:
if (tree clauses = gimple_omp_scan_clauses (as_a <gomp_scan *> (stmt)))
{
if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_INCLUSIVE)
ctx->scan_inclusive = true;
else if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_EXCLUSIVE)
ctx->scan_exclusive = true;
}
/* FALLTHRU */
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_GRID_BODY:
ctx = new_omp_context (stmt, ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
break;
case GIMPLE_OMP_TASKGROUP:
ctx = new_omp_context (stmt, ctx);
scan_sharing_clauses (gimple_omp_taskgroup_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
break;
case GIMPLE_OMP_TARGET:
if (is_gimple_omp_offloaded (stmt))
{
taskreg_nesting_level++;
scan_omp_target (as_a <gomp_target *> (stmt), ctx);
taskreg_nesting_level--;
}
else
scan_omp_target (as_a <gomp_target *> (stmt), ctx);
break;
case GIMPLE_OMP_TEAMS:
if (gimple_omp_teams_host (as_a <gomp_teams *> (stmt)))
{
taskreg_nesting_level++;
scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
taskreg_nesting_level--;
}
else
scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
break;
case GIMPLE_BIND:
{
tree var;
*handled_ops_p = false;
if (ctx)
for (var = gimple_bind_vars (as_a <gbind *> (stmt));
var ;
var = DECL_CHAIN (var))
insert_decl_map (&ctx->cb, var, var);
}
break;
default:
*handled_ops_p = false;
break;
}
return NULL_TREE;
}
/* Scan all the statements starting at the current statement. CTX
contains context information about the OMP directives and
clauses found during the scan. */
static void
scan_omp (gimple_seq *body_p, omp_context *ctx)
{
location_t saved_location;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = ctx;
wi.want_locations = true;
saved_location = input_location;
walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
input_location = saved_location;
}
/* Re-gimplification and code generation routines. */
/* Remove omp_member_access_dummy_var variables from gimple_bind_vars
of BIND if in a method. */
static void
maybe_remove_omp_member_access_dummy_vars (gbind *bind)
{
if (DECL_ARGUMENTS (current_function_decl)
&& DECL_ARTIFICIAL (DECL_ARGUMENTS (current_function_decl))
&& (TREE_CODE (TREE_TYPE (DECL_ARGUMENTS (current_function_decl)))
== POINTER_TYPE))
{
tree vars = gimple_bind_vars (bind);
for (tree *pvar = &vars; *pvar; )
if (omp_member_access_dummy_var (*pvar))
*pvar = DECL_CHAIN (*pvar);
else
pvar = &DECL_CHAIN (*pvar);
gimple_bind_set_vars (bind, vars);
}
}
/* Remove omp_member_access_dummy_var variables from BLOCK_VARS of
block and its subblocks. */
static void
remove_member_access_dummy_vars (tree block)
{
for (tree *pvar = &BLOCK_VARS (block); *pvar; )
if (omp_member_access_dummy_var (*pvar))
*pvar = DECL_CHAIN (*pvar);
else
pvar = &DECL_CHAIN (*pvar);
for (block = BLOCK_SUBBLOCKS (block); block; block = BLOCK_CHAIN (block))
remove_member_access_dummy_vars (block);
}
/* If a context was created for STMT when it was scanned, return it. */
static omp_context *
maybe_lookup_ctx (gimple *stmt)
{
splay_tree_node n;
n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
return n ? (omp_context *) n->value : NULL;
}
/* Find the mapping for DECL in CTX or the immediately enclosing
context that has a mapping for DECL.
If CTX is a nested parallel directive, we may have to use the decl
mappings created in CTX's parent context. Suppose that we have the
following parallel nesting (variable UIDs showed for clarity):
iD.1562 = 0;
#omp parallel shared(iD.1562) -> outer parallel
iD.1562 = iD.1562 + 1;
#omp parallel shared (iD.1562) -> inner parallel
iD.1562 = iD.1562 - 1;
Each parallel structure will create a distinct .omp_data_s structure
for copying iD.1562 in/out of the directive:
outer parallel .omp_data_s.1.i -> iD.1562
inner parallel .omp_data_s.2.i -> iD.1562
A shared variable mapping will produce a copy-out operation before
the parallel directive and a copy-in operation after it. So, in
this case we would have:
iD.1562 = 0;
.omp_data_o.1.i = iD.1562;
#omp parallel shared(iD.1562) -> outer parallel
.omp_data_i.1 = &.omp_data_o.1
.omp_data_i.1->i = .omp_data_i.1->i + 1;
.omp_data_o.2.i = iD.1562; -> **
#omp parallel shared(iD.1562) -> inner parallel
.omp_data_i.2 = &.omp_data_o.2
.omp_data_i.2->i = .omp_data_i.2->i - 1;
** This is a problem. The symbol iD.1562 cannot be referenced
inside the body of the outer parallel region. But since we are
emitting this copy operation while expanding the inner parallel
directive, we need to access the CTX structure of the outer
parallel directive to get the correct mapping:
.omp_data_o.2.i = .omp_data_i.1->i
Since there may be other workshare or parallel directives enclosing
the parallel directive, it may be necessary to walk up the context
parent chain. This is not a problem in general because nested
parallelism happens only rarely. */
static tree
lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t;
omp_context *up;
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
gcc_assert (!ctx->is_nested || t || is_global_var (decl));
return t ? t : decl;
}
/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
in outer contexts. */
static tree
maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t = NULL;
omp_context *up;
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
return t ? t : decl;
}
/* Construct the initialization value for reduction operation OP. */
tree
omp_reduction_init_op (location_t loc, enum tree_code op, tree type)
{
switch (op)
{
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_XOR_EXPR:
case NE_EXPR:
return build_zero_cst (type);
case MULT_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
case EQ_EXPR:
return fold_convert_loc (loc, type, integer_one_node);
case BIT_AND_EXPR:
return fold_convert_loc (loc, type, integer_minus_one_node);
case MAX_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max, min;
if (HONOR_INFINITIES (type))
{
real_inf (&max);
real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
}
else
real_maxval (&min, 1, TYPE_MODE (type));
return build_real (type, min);
}
else if (POINTER_TYPE_P (type))
{
wide_int min
= wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
return wide_int_to_tree (type, min);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MIN_VALUE (type);
}
case MIN_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max;
if (HONOR_INFINITIES (type))
real_inf (&max);
else
real_maxval (&max, 0, TYPE_MODE (type));
return build_real (type, max);
}
else if (POINTER_TYPE_P (type))
{
wide_int max
= wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
return wide_int_to_tree (type, max);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MAX_VALUE (type);
}
default:
gcc_unreachable ();
}
}
/* Construct the initialization value for reduction CLAUSE. */
tree
omp_reduction_init (tree clause, tree type)
{
return omp_reduction_init_op (OMP_CLAUSE_LOCATION (clause),
OMP_CLAUSE_REDUCTION_CODE (clause), type);
}
/* Return alignment to be assumed for var in CLAUSE, which should be
OMP_CLAUSE_ALIGNED. */
static tree
omp_clause_aligned_alignment (tree clause)
{
if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
/* Otherwise return implementation defined alignment. */
unsigned int al = 1;
opt_scalar_mode mode_iter;
auto_vector_modes modes;
targetm.vectorize.autovectorize_vector_modes (&modes, true);
static enum mode_class classes[]
= { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
for (int i = 0; i < 4; i += 2)
/* The for loop above dictates that we only walk through scalar classes. */
FOR_EACH_MODE_IN_CLASS (mode_iter, classes[i])
{
scalar_mode mode = mode_iter.require ();
machine_mode vmode = targetm.vectorize.preferred_simd_mode (mode);
if (GET_MODE_CLASS (vmode) != classes[i + 1])
continue;
machine_mode alt_vmode;
for (unsigned int j = 0; j < modes.length (); ++j)
if (related_vector_mode (modes[j], mode).exists (&alt_vmode)
&& known_ge (GET_MODE_SIZE (alt_vmode), GET_MODE_SIZE (vmode)))
vmode = alt_vmode;
tree type = lang_hooks.types.type_for_mode (mode, 1);
if (type == NULL_TREE || TYPE_MODE (type) != mode)
continue;
type = build_vector_type_for_mode (type, vmode);
if (TYPE_MODE (type) != vmode)
continue;
if (TYPE_ALIGN_UNIT (type) > al)
al = TYPE_ALIGN_UNIT (type);
}
return build_int_cst (integer_type_node, al);
}
/* This structure is part of the interface between lower_rec_simd_input_clauses
and lower_rec_input_clauses. */
class omplow_simd_context {
public:
omplow_simd_context () { memset (this, 0, sizeof (*this)); }
tree idx;
tree lane;
tree lastlane;
vec<tree, va_heap> simt_eargs;
gimple_seq simt_dlist;
poly_uint64_pod max_vf;
bool is_simt;
};
/* Helper function of lower_rec_input_clauses, used for #pragma omp simd
privatization. */
static bool
lower_rec_simd_input_clauses (tree new_var, omp_context *ctx,
omplow_simd_context *sctx, tree &ivar,
tree &lvar, tree *rvar = NULL,
tree *rvar2 = NULL)
{
if (known_eq (sctx->max_vf, 0U))
{
sctx->max_vf = sctx->is_simt ? omp_max_simt_vf () : omp_max_vf ();
if (maybe_gt (sctx->max_vf, 1U))
{
tree c = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_SAFELEN);
if (c)
{
poly_uint64 safe_len;
if (!poly_int_tree_p (OMP_CLAUSE_SAFELEN_EXPR (c), &safe_len)
|| maybe_lt (safe_len, 1U))
sctx->max_vf = 1;
else
sctx->max_vf = lower_bound (sctx->max_vf, safe_len);
}
}
if (maybe_gt (sctx->max_vf, 1U))
{
sctx->idx = create_tmp_var (unsigned_type_node);
sctx->lane = create_tmp_var (unsigned_type_node);
}
}
if (known_eq (sctx->max_vf, 1U))
return false;
if (sctx->is_simt)
{
if (is_gimple_reg (new_var))
{
ivar = lvar = new_var;
return true;
}
tree type = TREE_TYPE (new_var), ptype = build_pointer_type (type);
ivar = lvar = create_tmp_var (type);
TREE_ADDRESSABLE (ivar) = 1;
DECL_ATTRIBUTES (ivar) = tree_cons (get_identifier ("omp simt private"),
NULL, DECL_ATTRIBUTES (ivar));
sctx->simt_eargs.safe_push (build1 (ADDR_EXPR, ptype, ivar));
tree clobber = build_clobber (type);
gimple *g = gimple_build_assign (ivar, clobber);
gimple_seq_add_stmt (&sctx->simt_dlist, g);
}
else
{
tree atype = build_array_type_nelts (TREE_TYPE (new_var), sctx->max_vf);
tree avar = create_tmp_var_raw (atype);
if (TREE_ADDRESSABLE (new_var))
TREE_ADDRESSABLE (avar) = 1;
DECL_ATTRIBUTES (avar)
= tree_cons (get_identifier ("omp simd array"), NULL,
DECL_ATTRIBUTES (avar));
gimple_add_tmp_var (avar);
tree iavar = avar;
if (rvar && !ctx->for_simd_scan_phase)
{
/* For inscan reductions, create another array temporary,
which will hold the reduced value. */
iavar = create_tmp_var_raw (atype);
if (TREE_ADDRESSABLE (new_var))
TREE_ADDRESSABLE (iavar) = 1;
DECL_ATTRIBUTES (iavar)
= tree_cons (get_identifier ("omp simd array"), NULL,
tree_cons (get_identifier ("omp simd inscan"), NULL,
DECL_ATTRIBUTES (iavar)));
gimple_add_tmp_var (iavar);
ctx->cb.decl_map->put (avar, iavar);
if (sctx->lastlane == NULL_TREE)
sctx->lastlane = create_tmp_var (unsigned_type_node);
*rvar = build4 (ARRAY_REF, TREE_TYPE (new_var), iavar,
sctx->lastlane, NULL_TREE, NULL_TREE);
TREE_THIS_NOTRAP (*rvar) = 1;
if (ctx->scan_exclusive)
{
/* And for exclusive scan yet another one, which will
hold the value during the scan phase. */
tree savar = create_tmp_var_raw (atype);
if (TREE_ADDRESSABLE (new_var))
TREE_ADDRESSABLE (savar) = 1;
DECL_ATTRIBUTES (savar)
= tree_cons (get_identifier ("omp simd array"), NULL,
tree_cons (get_identifier ("omp simd inscan "
"exclusive"), NULL,
DECL_ATTRIBUTES (savar)));
gimple_add_tmp_var (savar);
ctx->cb.decl_map->put (iavar, savar);
*rvar2 = build4 (ARRAY_REF, TREE_TYPE (new_var), savar,
sctx->idx, NULL_TREE, NULL_TREE);
TREE_THIS_NOTRAP (*rvar2) = 1;
}
}
ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), iavar, sctx->idx,
NULL_TREE, NULL_TREE);
lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, sctx->lane,
NULL_TREE, NULL_TREE);
TREE_THIS_NOTRAP (ivar) = 1;
TREE_THIS_NOTRAP (lvar) = 1;
}
if (DECL_P (new_var))
{
SET_DECL_VALUE_EXPR (new_var, lvar);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
return true;
}
/* Helper function of lower_rec_input_clauses. For a reference
in simd reduction, add an underlying variable it will reference. */
static void
handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
{
tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
if (TREE_CONSTANT (z))
{
z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)),
get_name (new_vard));
gimple_add_tmp_var (z);
TREE_ADDRESSABLE (z) = 1;
z = build_fold_addr_expr_loc (loc, z);
gimplify_assign (new_vard, z, ilist);
}
}
/* Helper function for lower_rec_input_clauses. Emit into ilist sequence
code to emit (type) (tskred_temp[idx]). */
static tree
task_reduction_read (gimple_seq *ilist, tree tskred_temp, tree type,
unsigned idx)
{
unsigned HOST_WIDE_INT sz
= tree_to_uhwi (TYPE_SIZE_UNIT (pointer_sized_int_node));
tree r = build2 (MEM_REF, pointer_sized_int_node,
tskred_temp, build_int_cst (TREE_TYPE (tskred_temp),
idx * sz));
tree v = create_tmp_var (pointer_sized_int_node);
gimple *g = gimple_build_assign (v, r);
gimple_seq_add_stmt (ilist, g);
if (!useless_type_conversion_p (type, pointer_sized_int_node))
{
v = create_tmp_var (type);
g = gimple_build_assign (v, NOP_EXPR, gimple_assign_lhs (g));
gimple_seq_add_stmt (ilist, g);
}
return v;
}
/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
from the receiver (aka child) side and initializers for REFERENCE_TYPE
private variables. Initialization statements go in ILIST, while calls
to destructors go in DLIST. */
static void
lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
omp_context *ctx, struct omp_for_data *fd)
{
tree c, copyin_seq, x, ptr;
bool copyin_by_ref = false;
bool lastprivate_firstprivate = false;
bool reduction_omp_orig_ref = false;
int pass;
bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD);
omplow_simd_context sctx = omplow_simd_context ();
tree simt_lane = NULL_TREE, simtrec = NULL_TREE;
tree ivar = NULL_TREE, lvar = NULL_TREE, uid = NULL_TREE;
gimple_seq llist[4] = { };
tree nonconst_simd_if = NULL_TREE;
copyin_seq = NULL;
sctx.is_simt = is_simd && omp_find_clause (clauses, OMP_CLAUSE__SIMT_);
/* Set max_vf=1 (which will later enforce safelen=1) in simd loops
with data sharing clauses referencing variable sized vars. That
is unnecessarily hard to support and very unlikely to result in
vectorized code anyway. */
if (is_simd)
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_LINEAR:
if (OMP_CLAUSE_LINEAR_ARRAY (c))
sctx.max_vf = 1;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
if (is_variable_sized (OMP_CLAUSE_DECL (c)))
sctx.max_vf = 1;
else if (omp_is_reference (OMP_CLAUSE_DECL (c)))
{
tree rtype = TREE_TYPE (TREE_TYPE (OMP_CLAUSE_DECL (c)));
if (!TREE_CONSTANT (TYPE_SIZE_UNIT (rtype)))
sctx.max_vf = 1;
}
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF
|| is_variable_sized (OMP_CLAUSE_DECL (c)))
sctx.max_vf = 1;
else if (omp_is_reference (OMP_CLAUSE_DECL (c)))
{
tree rtype = TREE_TYPE (TREE_TYPE (OMP_CLAUSE_DECL (c)));
if (!TREE_CONSTANT (TYPE_SIZE_UNIT (rtype)))
sctx.max_vf = 1;
}
break;
case OMP_CLAUSE_IF:
if (integer_zerop (OMP_CLAUSE_IF_EXPR (c)))
sctx.max_vf = 1;
else if (TREE_CODE (OMP_CLAUSE_IF_EXPR (c)) != INTEGER_CST)
nonconst_simd_if = OMP_CLAUSE_IF_EXPR (c);
break;
case OMP_CLAUSE_SIMDLEN:
if (integer_onep (OMP_CLAUSE_SIMDLEN_EXPR (c)))
sctx.max_vf = 1;
break;
case OMP_CLAUSE__CONDTEMP_:
/* FIXME: lastprivate(conditional:) not handled for SIMT yet. */
if (sctx.is_simt)
sctx.max_vf = 1;
break;
default:
continue;
}
/* Add a placeholder for simduid. */
if (sctx.is_simt && maybe_ne (sctx.max_vf, 1U))
sctx.simt_eargs.safe_push (NULL_TREE);
unsigned task_reduction_cnt = 0;
unsigned task_reduction_cntorig = 0;
unsigned task_reduction_cnt_full = 0;
unsigned task_reduction_cntorig_full = 0;
unsigned task_reduction_other_cnt = 0;
tree tskred_atype = NULL_TREE, tskred_avar = NULL_TREE;
tree tskred_base = NULL_TREE, tskred_temp = NULL_TREE;
/* Do all the fixed sized types in the first pass, and the variable sized
types in the second pass. This makes sure that the scalar arguments to
the variable sized types are processed before we use them in the
variable sized operations. For task reductions we use 4 passes, in the
first two we ignore them, in the third one gather arguments for
GOMP_task_reduction_remap call and in the last pass actually handle
the task reductions. */
for (pass = 0; pass < ((task_reduction_cnt || task_reduction_other_cnt)
? 4 : 2); ++pass)
{
if (pass == 2 && task_reduction_cnt)
{
tskred_atype
= build_array_type_nelts (ptr_type_node, task_reduction_cnt
+ task_reduction_cntorig);
tskred_avar = create_tmp_var_raw (tskred_atype);
gimple_add_tmp_var (tskred_avar);
TREE_ADDRESSABLE (tskred_avar) = 1;
task_reduction_cnt_full = task_reduction_cnt;
task_reduction_cntorig_full = task_reduction_cntorig;
}
else if (pass == 3 && task_reduction_cnt)
{
x = builtin_decl_explicit (BUILT_IN_GOMP_TASK_REDUCTION_REMAP);
gimple *g
= gimple_build_call (x, 3, size_int (task_reduction_cnt),
size_int (task_reduction_cntorig),
build_fold_addr_expr (tskred_avar));
gimple_seq_add_stmt (ilist, g);
}
if (pass == 3 && task_reduction_other_cnt)
{
/* For reduction clauses, build
tskred_base = (void *) tskred_temp[2]
+ omp_get_thread_num () * tskred_temp[1]
or if tskred_temp[1] is known to be constant, that constant
directly. This is the start of the private reduction copy block
for the current thread. */
tree v = create_tmp_var (integer_type_node);
x = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
gimple *g = gimple_build_call (x, 0);
gimple_call_set_lhs (g, v);
gimple_seq_add_stmt (ilist, g);
c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
tskred_temp = OMP_CLAUSE_DECL (c);
if (is_taskreg_ctx (ctx))
tskred_temp = lookup_decl (tskred_temp, ctx);
tree v2 = create_tmp_var (sizetype);
g = gimple_build_assign (v2, NOP_EXPR, v);
gimple_seq_add_stmt (ilist, g);
if (ctx->task_reductions[0])
v = fold_convert (sizetype, ctx->task_reductions[0]);
else
v = task_reduction_read (ilist, tskred_temp, sizetype, 1);
tree v3 = create_tmp_var (sizetype);
g = gimple_build_assign (v3, MULT_EXPR, v2, v);
gimple_seq_add_stmt (ilist, g);
v = task_reduction_read (ilist, tskred_temp, ptr_type_node, 2);
tskred_base = create_tmp_var (ptr_type_node);
g = gimple_build_assign (tskred_base, POINTER_PLUS_EXPR, v, v3);
gimple_seq_add_stmt (ilist, g);
}
task_reduction_cnt = 0;
task_reduction_cntorig = 0;
task_reduction_other_cnt = 0;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
tree var, new_var;
bool by_ref;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
bool task_reduction_p = false;
bool task_reduction_needs_orig_p = false;
tree cond = NULL_TREE;
switch (c_kind)
{
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_DEBUG (c))
continue;
break;
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct inside
of target construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
&& !is_host_teams_ctx (ctx))
continue;
if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
{
gcc_assert (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c)
|| is_global_var (OMP_CLAUSE_DECL (c)));
continue;
}
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
break;
case OMP_CLAUSE_LINEAR:
if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
lastprivate_firstprivate = true;
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
if (is_task_ctx (ctx) || OMP_CLAUSE_REDUCTION_TASK (c))
{
task_reduction_p = true;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
task_reduction_other_cnt++;
if (pass == 2)
continue;
}
else
task_reduction_cnt++;
if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
{
var = OMP_CLAUSE_DECL (c);
/* If var is a global variable that isn't privatized
in outer contexts, we don't need to look up the
original address, it is always the address of the
global variable itself. */
if (!DECL_P (var)
|| omp_is_reference (var)
|| !is_global_var
(maybe_lookup_decl_in_outer_ctx (var, ctx)))
{
task_reduction_needs_orig_p = true;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
task_reduction_cntorig++;
}
}
}
else if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
reduction_omp_orig_ref = true;
break;
case OMP_CLAUSE__REDUCTEMP_:
if (!is_taskreg_ctx (ctx))
continue;
/* FALLTHRU */
case OMP_CLAUSE__LOOPTEMP_:
/* Handle _looptemp_/_reductemp_ clauses only on
parallel/task. */
if (fd)
continue;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
{
lastprivate_firstprivate = true;
if (pass != 0 || is_taskloop_ctx (ctx))
continue;
}
/* Even without corresponding firstprivate, if
decl is Fortran allocatable, it needs outer var
reference. */
else if (pass == 0
&& lang_hooks.decls.omp_private_outer_ref
(OMP_CLAUSE_DECL (c)))
lastprivate_firstprivate = true;
break;
case OMP_CLAUSE_ALIGNED:
if (pass != 1)
continue;
var = OMP_CLAUSE_DECL (c);
if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
&& !is_global_var (var))
{
new_var = maybe_lookup_decl (var, ctx);
if (new_var == NULL_TREE)
new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
tree alarg = omp_clause_aligned_alignment (c);
alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
x = build_call_expr_loc (clause_loc, x, 2, new_var, alarg);
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
gimplify_and_add (x, ilist);
}
else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
&& is_global_var (var))
{
tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
new_var = lookup_decl (var, ctx);
t = maybe_lookup_decl_in_outer_ctx (var, ctx);
t = build_fold_addr_expr_loc (clause_loc, t);
t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
tree alarg = omp_clause_aligned_alignment (c);
alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
t = build_call_expr_loc (clause_loc, t2, 2, t, alarg);
t = fold_convert_loc (clause_loc, ptype, t);
x = create_tmp_var (ptype);
t = build2 (MODIFY_EXPR, ptype, x, t);
gimplify_and_add (t, ilist);
t = build_simple_mem_ref_loc (clause_loc, x);
SET_DECL_VALUE_EXPR (new_var, t);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
continue;
case OMP_CLAUSE__CONDTEMP_:
if (is_parallel_ctx (ctx)
|| (is_simd && !OMP_CLAUSE__CONDTEMP__ITER (c)))
break;
continue;
default:
continue;
}
if (task_reduction_p != (pass >= 2))
continue;
new_var = var = OMP_CLAUSE_DECL (c);
if ((c_kind == OMP_CLAUSE_REDUCTION
|| c_kind == OMP_CLAUSE_IN_REDUCTION)
&& TREE_CODE (var) == MEM_REF)
{
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == POINTER_PLUS_EXPR)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == INDIRECT_REF
|| TREE_CODE (var) == ADDR_EXPR)
var = TREE_OPERAND (var, 0);
if (is_variable_sized (var))
{
gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
var = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var) == INDIRECT_REF);
var = TREE_OPERAND (var, 0);
gcc_assert (DECL_P (var));
}
new_var = var;
}
if (c_kind != OMP_CLAUSE_COPYIN)
new_var = lookup_decl (var, ctx);
if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
{
if (pass != 0)
continue;
}
/* C/C++ array section reductions. */
else if ((c_kind == OMP_CLAUSE_REDUCTION
|| c_kind == OMP_CLAUSE_IN_REDUCTION)
&& var != OMP_CLAUSE_DECL (c))
{
if (pass == 0)
continue;
tree bias = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
tree orig_var = TREE_OPERAND (OMP_CLAUSE_DECL (c), 0);
if (TREE_CODE (orig_var) == POINTER_PLUS_EXPR)
{
tree b = TREE_OPERAND (orig_var, 1);
b = maybe_lookup_decl (b, ctx);
if (b == NULL)
{
b = TREE_OPERAND (orig_var, 1);
b = maybe_lookup_decl_in_outer_ctx (b, ctx);
}
if (integer_zerop (bias))
bias = b;
else
{
bias = fold_convert_loc (clause_loc,
TREE_TYPE (b), bias);
bias = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (b), b, bias);
}
orig_var = TREE_OPERAND (orig_var, 0);
}
if (pass == 2)
{
tree out = maybe_lookup_decl_in_outer_ctx (var, ctx);
if (is_global_var (out)
&& TREE_CODE (TREE_TYPE (out)) != POINTER_TYPE
&& (TREE_CODE (TREE_TYPE (out)) != REFERENCE_TYPE
|| (TREE_CODE (TREE_TYPE (TREE_TYPE (out)))
!= POINTER_TYPE)))
x = var;
else
{
bool by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
if (TREE_CODE (TREE_TYPE (var)) == REFERENCE_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_TYPE (var)))
== POINTER_TYPE))
x = build_fold_addr_expr (x);
}
if (TREE_CODE (orig_var) == INDIRECT_REF)
x = build_simple_mem_ref (x);
else if (TREE_CODE (orig_var) == ADDR_EXPR)
{
if (var == TREE_OPERAND (orig_var, 0))
x = build_fold_addr_expr (x);
}
bias = fold_convert (sizetype, bias);
x = fold_convert (ptr_type_node, x);
x = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (x), x, bias);
unsigned cnt = task_reduction_cnt - 1;
if (!task_reduction_needs_orig_p)
cnt += (task_reduction_cntorig_full
- task_reduction_cntorig);
else
cnt = task_reduction_cntorig - 1;
tree r = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (cnt), NULL_TREE, NULL_TREE);
gimplify_assign (r, x, ilist);
continue;
}
if (TREE_CODE (orig_var) == INDIRECT_REF
|| TREE_CODE (orig_var) == ADDR_EXPR)
orig_var = TREE_OPERAND (orig_var, 0);
tree d = OMP_CLAUSE_DECL (c);
tree type = TREE_TYPE (d);
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
const char *name = get_name (orig_var);
if (pass == 3)
{
tree xv = create_tmp_var (ptr_type_node);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
{
unsigned cnt = task_reduction_cnt - 1;
if (!task_reduction_needs_orig_p)
cnt += (task_reduction_cntorig_full
- task_reduction_cntorig);
else
cnt = task_reduction_cntorig - 1;
x = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (cnt), NULL_TREE, NULL_TREE);
gimple *g = gimple_build_assign (xv, x);
gimple_seq_add_stmt (ilist, g);
}
else
{
unsigned int idx = *ctx->task_reduction_map->get (c);
tree off;
if (ctx->task_reductions[1 + idx])
off = fold_convert (sizetype,
ctx->task_reductions[1 + idx]);
else
off = task_reduction_read (ilist, tskred_temp, sizetype,
7 + 3 * idx + 1);
gimple *g = gimple_build_assign (xv, POINTER_PLUS_EXPR,
tskred_base, off);
gimple_seq_add_stmt (ilist, g);
}
x = fold_convert (build_pointer_type (boolean_type_node),
xv);
if (TREE_CONSTANT (v))
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (x), x,
TYPE_SIZE_UNIT (type));
else
{
tree t = maybe_lookup_decl (v, ctx);
if (t)
v = t;
else
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
gimplify_expr (&v, ilist, NULL, is_gimple_val,
fb_rvalue);
t = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (v), v,
build_int_cst (TREE_TYPE (v), 1));
t = fold_build2_loc (clause_loc, MULT_EXPR,
TREE_TYPE (v), t,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (x), x, t);
}
cond = create_tmp_var (TREE_TYPE (x));
gimplify_assign (cond, x, ilist);
x = xv;
}
else if (TREE_CONSTANT (v))
{
x = create_tmp_var_raw (type, name);
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree t = maybe_lookup_decl (v, ctx);
if (t)
v = t;
else
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
gimplify_expr (&v, ilist, NULL, is_gimple_val, fb_rvalue);
t = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (v), v,
build_int_cst (TREE_TYPE (v), 1));
t = fold_build2_loc (clause_loc, MULT_EXPR,
TREE_TYPE (v), t,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
tree al = size_int (TYPE_ALIGN (TREE_TYPE (type)));
x = build_call_expr_loc (clause_loc, atmp, 2, t, al);
}
tree ptype = build_pointer_type (TREE_TYPE (type));
x = fold_convert_loc (clause_loc, ptype, x);
tree y = create_tmp_var (ptype, name);
gimplify_assign (y, x, ilist);
x = y;
tree yb = y;
if (!integer_zerop (bias))
{
bias = fold_convert_loc (clause_loc, pointer_sized_int_node,
bias);
yb = fold_convert_loc (clause_loc, pointer_sized_int_node,
x);
yb = fold_build2_loc (clause_loc, MINUS_EXPR,
pointer_sized_int_node, yb, bias);
x = fold_convert_loc (clause_loc, TREE_TYPE (x), yb);
yb = create_tmp_var (ptype, name);
gimplify_assign (yb, x, ilist);
x = yb;
}
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == POINTER_PLUS_EXPR)
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == ADDR_EXPR)
{
if (orig_var != var)
{
gcc_assert (is_variable_sized (orig_var));
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var),
x);
gimplify_assign (new_var, x, ilist);
tree new_orig_var = lookup_decl (orig_var, ctx);
tree t = build_fold_indirect_ref (new_var);
DECL_IGNORED_P (new_var) = 0;
TREE_THIS_NOTRAP (t) = 1;
SET_DECL_VALUE_EXPR (new_orig_var, t);
DECL_HAS_VALUE_EXPR_P (new_orig_var) = 1;
}
else
{
x = build2 (MEM_REF, TREE_TYPE (new_var), x,
build_int_cst (ptype, 0));
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
}
else
{
gcc_assert (orig_var == var);
if (TREE_CODE (d) == INDIRECT_REF)
{
x = create_tmp_var (ptype, name);
TREE_ADDRESSABLE (x) = 1;
gimplify_assign (x, yb, ilist);
x = build_fold_addr_expr_loc (clause_loc, x);
}
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_assign (new_var, x, ilist);
}
/* GOMP_taskgroup_reduction_register memsets the whole
array to zero. If the initializer is zero, we don't
need to initialize it again, just mark it as ever
used unconditionally, i.e. cond = true. */
if (cond
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == NULL_TREE
&& initializer_zerop (omp_reduction_init (c,
TREE_TYPE (type))))
{
gimple *g = gimple_build_assign (build_simple_mem_ref (cond),
boolean_true_node);
gimple_seq_add_stmt (ilist, g);
continue;
}
tree end = create_artificial_label (UNKNOWN_LOCATION);
if (cond)
{
gimple *g;
if (!is_parallel_ctx (ctx))
{
tree condv = create_tmp_var (boolean_type_node);
g = gimple_build_assign (condv,
build_simple_mem_ref (cond));
gimple_seq_add_stmt (ilist, g);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, condv,
boolean_false_node, end, lab1);
gimple_seq_add_stmt (ilist, g);
gimple_seq_add_stmt (ilist, gimple_build_label (lab1));
}
g = gimple_build_assign (build_simple_mem_ref (cond),
boolean_true_node);
gimple_seq_add_stmt (ilist, g);
}
tree y1 = create_tmp_var (ptype);
gimplify_assign (y1, y, ilist);
tree i2 = NULL_TREE, y2 = NULL_TREE;
tree body2 = NULL_TREE, end2 = NULL_TREE;
tree y3 = NULL_TREE, y4 = NULL_TREE;
if (task_reduction_needs_orig_p)
{
y3 = create_tmp_var (ptype);
tree ref;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
ref = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (task_reduction_cnt_full
+ task_reduction_cntorig - 1),
NULL_TREE, NULL_TREE);
else
{
unsigned int idx = *ctx->task_reduction_map->get (c);
ref = task_reduction_read (ilist, tskred_temp, ptype,
7 + 3 * idx);
}
gimplify_assign (y3, ref, ilist);
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) || is_simd)
{
if (pass != 3)
{
y2 = create_tmp_var (ptype);
gimplify_assign (y2, y, ilist);
}
if (is_simd || OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
{
tree ref = build_outer_var_ref (var, ctx);
/* For ref build_outer_var_ref already performs this. */
if (TREE_CODE (d) == INDIRECT_REF)
gcc_assert (omp_is_reference (var));
else if (TREE_CODE (d) == ADDR_EXPR)
ref = build_fold_addr_expr (ref);
else if (omp_is_reference (var))
ref = build_fold_addr_expr (ref);
ref = fold_convert_loc (clause_loc, ptype, ref);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
&& OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
{
y3 = create_tmp_var (ptype);
gimplify_assign (y3, unshare_expr (ref), ilist);
}
if (is_simd)
{
y4 = create_tmp_var (ptype);
gimplify_assign (y4, ref, dlist);
}
}
}
tree i = create_tmp_var (TREE_TYPE (v));
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), ilist);
tree body = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (ilist, gimple_build_label (body));
if (y2)
{
i2 = create_tmp_var (TREE_TYPE (v));
gimplify_assign (i2, build_int_cst (TREE_TYPE (v), 0), dlist);
body2 = create_artificial_label (UNKNOWN_LOCATION);
end2 = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (dlist, gimple_build_label (body2));
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree decl_placeholder
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
SET_DECL_VALUE_EXPR (decl_placeholder,
build_simple_mem_ref (y1));
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
SET_DECL_VALUE_EXPR (placeholder,
y3 ? build_simple_mem_ref (y3)
: error_mark_node);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
x = lang_hooks.decls.omp_clause_default_ctor
(c, build_simple_mem_ref (y1),
y3 ? build_simple_mem_ref (y3) : NULL_TREE);
if (x)
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (ilist, tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (is_simd)
{
SET_DECL_VALUE_EXPR (decl_placeholder,
build_simple_mem_ref (y2));
SET_DECL_VALUE_EXPR (placeholder,
build_simple_mem_ref (y4));
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (dlist, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 0;
if (y2)
{
x = lang_hooks.decls.omp_clause_dtor
(c, build_simple_mem_ref (y2));
if (x)
gimplify_and_add (x, dlist);
}
}
else
{
x = omp_reduction_init (c, TREE_TYPE (type));
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it
acts identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
gimplify_assign (build_simple_mem_ref (y1), x, ilist);
if (is_simd)
{
x = build2 (code, TREE_TYPE (type),
build_simple_mem_ref (y4),
build_simple_mem_ref (y2));
gimplify_assign (build_simple_mem_ref (y4), x, dlist);
}
}
gimple *g
= gimple_build_assign (y1, POINTER_PLUS_EXPR, y1,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (ilist, g);
if (y3)
{
g = gimple_build_assign (y3, POINTER_PLUS_EXPR, y3,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (ilist, g);
}
g = gimple_build_assign (i, PLUS_EXPR, i,
build_int_cst (TREE_TYPE (i), 1));
gimple_seq_add_stmt (ilist, g);
g = gimple_build_cond (LE_EXPR, i, v, body, end);
gimple_seq_add_stmt (ilist, g);
gimple_seq_add_stmt (ilist, gimple_build_label (end));
if (y2)
{
g = gimple_build_assign (y2, POINTER_PLUS_EXPR, y2,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (dlist, g);
if (y4)
{
g = gimple_build_assign
(y4, POINTER_PLUS_EXPR, y4,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (dlist, g);
}
g = gimple_build_assign (i2, PLUS_EXPR, i2,
build_int_cst (TREE_TYPE (i2), 1));
gimple_seq_add_stmt (dlist, g);
g = gimple_build_cond (LE_EXPR, i2, v, body2, end2);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (end2));
}
continue;
}
else if (pass == 2)
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
x = var;
else
{
bool by_ref = use_pointer_for_field (var, ctx);
x = build_receiver_ref (var, by_ref, ctx);
}
if (!omp_is_reference (var))
x = build_fold_addr_expr (x);
x = fold_convert (ptr_type_node, x);
unsigned cnt = task_reduction_cnt - 1;
if (!task_reduction_needs_orig_p)
cnt += task_reduction_cntorig_full - task_reduction_cntorig;
else
cnt = task_reduction_cntorig - 1;
tree r = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (cnt), NULL_TREE, NULL_TREE);
gimplify_assign (r, x, ilist);
continue;
}
else if (pass == 3)
{
tree type = TREE_TYPE (new_var);
if (!omp_is_reference (var))
type = build_pointer_type (type);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
{
unsigned cnt = task_reduction_cnt - 1;
if (!task_reduction_needs_orig_p)
cnt += (task_reduction_cntorig_full
- task_reduction_cntorig);
else
cnt = task_reduction_cntorig - 1;
x = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (cnt), NULL_TREE, NULL_TREE);
}
else
{
unsigned int idx = *ctx->task_reduction_map->get (c);
tree off;
if (ctx->task_reductions[1 + idx])
off = fold_convert (sizetype,
ctx->task_reductions[1 + idx]);
else
off = task_reduction_read (ilist, tskred_temp, sizetype,
7 + 3 * idx + 1);
x = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
tskred_base, off);
}
x = fold_convert (type, x);
tree t;
if (omp_is_reference (var))
{
gimplify_assign (new_var, x, ilist);
t = new_var;
new_var = build_simple_mem_ref (new_var);
}
else
{
t = create_tmp_var (type);
gimplify_assign (t, x, ilist);
SET_DECL_VALUE_EXPR (new_var, build_simple_mem_ref (t));
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
t = fold_convert (build_pointer_type (boolean_type_node), t);
t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
cond = create_tmp_var (TREE_TYPE (t));
gimplify_assign (cond, t, ilist);
}
else if (is_variable_sized (var))
{
/* For variable sized types, we need to allocate the
actual storage here. Call alloca and store the
result in the pointer decl that we created elsewhere. */
if (pass == 0)
continue;
if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
{
gcall *stmt;
tree tmp, atmp;
ptr = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
ptr = TREE_OPERAND (ptr, 0);
gcc_assert (DECL_P (ptr));
x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
/* void *tmp = __builtin_alloca */
atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
stmt = gimple_build_call (atmp, 2, x,
size_int (DECL_ALIGN (var)));
cfun->calls_alloca = 1;
tmp = create_tmp_var_raw (ptr_type_node);
gimple_add_tmp_var (tmp);
gimple_call_set_lhs (stmt, tmp);
gimple_seq_add_stmt (ilist, stmt);
x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
gimplify_assign (ptr, x, ilist);
}
}
else if (omp_is_reference (var)
&& (c_kind != OMP_CLAUSE_FIRSTPRIVATE
|| !OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c)))
{
/* For references that are being privatized for Fortran,
allocate new backing storage for the new pointer
variable. This allows us to avoid changing all the
code that expects a pointer to something that expects
a direct variable. */
if (pass == 0)
continue;
x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
{
x = build_receiver_ref (var, false, ctx);
x = build_fold_addr_expr_loc (clause_loc, x);
}
else if (TREE_CONSTANT (x))
{
/* For reduction in SIMD loop, defer adding the
initialization of the reference, because if we decide
to use SIMD array for it, the initilization could cause
expansion ICE. Ditto for other privatization clauses. */
if (is_simd)
x = NULL_TREE;
else
{
x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
get_name (var));
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
}
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree rtype = TREE_TYPE (TREE_TYPE (new_var));
tree al = size_int (TYPE_ALIGN (rtype));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
}
if (x)
{
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_assign (new_var, x, ilist);
}
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
}
else if ((c_kind == OMP_CLAUSE_REDUCTION
|| c_kind == OMP_CLAUSE_IN_REDUCTION)
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
if (pass == 0)
continue;
}
else if (pass != 0)
continue;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct inside
target construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
&& !is_host_teams_ctx (ctx))
continue;
/* Shared global vars are just accessed directly. */
if (is_global_var (new_var))
break;
/* For taskloop firstprivate/lastprivate, represented
as firstprivate and shared clause on the task, new_var
is the firstprivate var. */
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
break;
/* Set up the DECL_VALUE_EXPR for shared variables now. This
needs to be delayed until after fixup_child_record_type so
that we get the correct type during the dereference. */
by_ref = use_pointer_for_field (var, ctx);
x = build_receiver_ref (var, by_ref, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
/* ??? If VAR is not passed by reference, and the variable
hasn't been initialized yet, then we'll get a warning for
the store into the omp_data_s structure. Ideally, we'd be
able to notice this and not store anything at all, but
we're generating code too early. Suppress the warning. */
if (!by_ref)
TREE_NO_WARNING (var) = 1;
break;
case OMP_CLAUSE__CONDTEMP_:
if (is_parallel_ctx (ctx))
{
x = build_receiver_ref (var, false, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else if (is_simd && !OMP_CLAUSE__CONDTEMP__ITER (c))
{
x = build_zero_cst (TREE_TYPE (var));
goto do_private;
}
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
x = build_outer_var_ref (var, ctx);
else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
{
if (is_task_ctx (ctx))
x = build_receiver_ref (var, false, ctx);
else
x = build_outer_var_ref (var, ctx, OMP_CLAUSE_PRIVATE);
}
else
x = NULL;
do_private:
tree nx;
bool copy_ctor;
copy_ctor = false;
nx = unshare_expr (new_var);
if (is_simd
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c))
copy_ctor = true;
if (copy_ctor)
nx = lang_hooks.decls.omp_clause_copy_ctor (c, nx, x);
else
nx = lang_hooks.decls.omp_clause_default_ctor (c, nx, x);
if (is_simd)
{
tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
if ((TREE_ADDRESSABLE (new_var) || nx || y
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& (gimple_omp_for_collapse (ctx->stmt) != 1
|| (gimple_omp_for_index (ctx->stmt, 0)
!= new_var)))
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE__CONDTEMP_
|| omp_is_reference (var))
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
tree new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
if (nx)
{
tree iv = unshare_expr (ivar);
if (copy_ctor)
x = lang_hooks.decls.omp_clause_copy_ctor (c, iv,
x);
else
x = lang_hooks.decls.omp_clause_default_ctor (c,
iv,
x);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__CONDTEMP_)
{
x = build2 (MODIFY_EXPR, TREE_TYPE (ivar),
unshare_expr (ivar), x);
nx = x;
}
if (nx && x)
gimplify_and_add (x, &llist[0]);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
{
tree v = new_var;
if (!DECL_P (v))
{
gcc_assert (TREE_CODE (v) == MEM_REF);
v = TREE_OPERAND (v, 0);
gcc_assert (DECL_P (v));
}
v = *ctx->lastprivate_conditional_map->get (v);
tree t = create_tmp_var (TREE_TYPE (v));
tree z = build_zero_cst (TREE_TYPE (v));
tree orig_v
= build_outer_var_ref (var, ctx,
OMP_CLAUSE_LASTPRIVATE);
gimple_seq_add_stmt (dlist,
gimple_build_assign (t, z));
gcc_assert (DECL_HAS_VALUE_EXPR_P (v));
tree civar = DECL_VALUE_EXPR (v);
gcc_assert (TREE_CODE (civar) == ARRAY_REF);
civar = unshare_expr (civar);
TREE_OPERAND (civar, 1) = sctx.idx;
x = build2 (MODIFY_EXPR, TREE_TYPE (t), t,
unshare_expr (civar));
x = build2 (COMPOUND_EXPR, TREE_TYPE (orig_v), x,
build2 (MODIFY_EXPR, TREE_TYPE (orig_v),
orig_v, unshare_expr (ivar)));
tree cond = build2 (LT_EXPR, boolean_type_node, t,
civar);
x = build3 (COND_EXPR, void_type_node, cond, x,
void_node);
gimple_seq tseq = NULL;
gimplify_and_add (x, &tseq);
if (ctx->outer)
lower_omp (&tseq, ctx->outer);
gimple_seq_add_seq (&llist[1], tseq);
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& ctx->for_simd_scan_phase)
{
x = unshare_expr (ivar);
tree orig_v
= build_outer_var_ref (var, ctx,
OMP_CLAUSE_LASTPRIVATE);
x = lang_hooks.decls.omp_clause_assign_op (c, x,
orig_v);
gimplify_and_add (x, &llist[0]);
}
if (y)
{
y = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (y)
gimplify_and_add (y, &llist[1]);
}
break;
}
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
tree new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
tree type = TREE_TYPE (TREE_TYPE (new_vard));
x = TYPE_SIZE_UNIT (type);
if (TREE_CONSTANT (x))
{
x = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
x = fold_convert_loc (clause_loc,
TREE_TYPE (new_vard), x);
gimplify_assign (new_vard, x, ilist);
}
}
}
if (nx)
gimplify_and_add (nx, ilist);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& is_simd
&& ctx->for_simd_scan_phase)
{
tree orig_v = build_outer_var_ref (var, ctx,
OMP_CLAUSE_LASTPRIVATE);
x = lang_hooks.decls.omp_clause_assign_op (c, new_var,
orig_v);
gimplify_and_add (x, ilist);
}
/* FALLTHRU */
do_dtor:
x = lang_hooks.decls.omp_clause_dtor (c, new_var);
if (x)
gimplify_and_add (x, dlist);
break;
case OMP_CLAUSE_LINEAR:
if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
goto do_firstprivate;
if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
x = NULL;
else
x = build_outer_var_ref (var, ctx);
goto do_private;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_task_ctx (ctx))
{
if ((omp_is_reference (var)
&& !OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c))
|| is_variable_sized (var))
goto do_dtor;
else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
ctx))
|| use_pointer_for_field (var, NULL))
{
x = build_receiver_ref (var, false, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
goto do_dtor;
}
}
if (OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c)
&& omp_is_reference (var))
{
x = build_outer_var_ref (var, ctx);
gcc_assert (TREE_CODE (x) == MEM_REF
&& integer_zerop (TREE_OPERAND (x, 1)));
x = TREE_OPERAND (x, 0);
x = lang_hooks.decls.omp_clause_copy_ctor
(c, unshare_expr (new_var), x);
gimplify_and_add (x, ilist);
goto do_dtor;
}
do_firstprivate:
x = build_outer_var_ref (var, ctx);
if (is_simd)
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& gimple_omp_for_combined_into_p (ctx->stmt))
{
tree t = OMP_CLAUSE_LINEAR_STEP (c);
tree stept = TREE_TYPE (t);
tree ct = omp_find_clause (clauses,
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (ct);
tree l = OMP_CLAUSE_DECL (ct);
tree n1 = fd->loop.n1;
tree step = fd->loop.step;
tree itype = TREE_TYPE (l);
if (POINTER_TYPE_P (itype))
itype = signed_type_for (itype);
l = fold_build2 (MINUS_EXPR, itype, l, n1);
if (TYPE_UNSIGNED (itype)
&& fd->loop.cond_code == GT_EXPR)
l = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, l),
fold_build1 (NEGATE_EXPR,
itype, step));
else
l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
t = fold_build2 (MULT_EXPR, stept,
fold_convert (stept, l), t);
if (OMP_CLAUSE_LINEAR_ARRAY (c))
{
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
tree new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
tree type = TREE_TYPE (TREE_TYPE (new_vard));
nx = TYPE_SIZE_UNIT (type);
if (TREE_CONSTANT (nx))
{
nx = create_tmp_var_raw (type,
get_name (var));
gimple_add_tmp_var (nx);
TREE_ADDRESSABLE (nx) = 1;
nx = build_fold_addr_expr_loc (clause_loc,
nx);
nx = fold_convert_loc (clause_loc,
TREE_TYPE (new_vard),
nx);
gimplify_assign (new_vard, nx, ilist);
}
}
x = lang_hooks.decls.omp_clause_linear_ctor
(c, new_var, x, t);
gimplify_and_add (x, ilist);
goto do_dtor;
}
if (POINTER_TYPE_P (TREE_TYPE (x)))
x = fold_build2 (POINTER_PLUS_EXPR,
TREE_TYPE (x), x, t);
else
x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
}
if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
|| TREE_ADDRESSABLE (new_var)
|| omp_is_reference (var))
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
tree new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
{
tree iv = create_tmp_var (TREE_TYPE (new_var));
x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
gimplify_and_add (x, ilist);
gimple_stmt_iterator gsi
= gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
gassign *g
= gimple_build_assign (unshare_expr (lvar), iv);
gsi_insert_before_without_update (&gsi, g,
GSI_SAME_STMT);
tree t = OMP_CLAUSE_LINEAR_STEP (c);
enum tree_code code = PLUS_EXPR;
if (POINTER_TYPE_P (TREE_TYPE (new_var)))
code = POINTER_PLUS_EXPR;
g = gimple_build_assign (iv, code, iv, t);
gsi_insert_before_without_update (&gsi, g,
GSI_SAME_STMT);
break;
}
x = lang_hooks.decls.omp_clause_copy_ctor
(c, unshare_expr (ivar), x);
gimplify_and_add (x, &llist[0]);
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
gimplify_and_add (x, &llist[1]);
break;
}
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
tree new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
tree type = TREE_TYPE (TREE_TYPE (new_vard));
nx = TYPE_SIZE_UNIT (type);
if (TREE_CONSTANT (nx))
{
nx = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (nx);
TREE_ADDRESSABLE (nx) = 1;
nx = build_fold_addr_expr_loc (clause_loc, nx);
nx = fold_convert_loc (clause_loc,
TREE_TYPE (new_vard), nx);
gimplify_assign (new_vard, nx, ilist);
}
}
}
x = lang_hooks.decls.omp_clause_copy_ctor
(c, unshare_expr (new_var), x);
gimplify_and_add (x, ilist);
goto do_dtor;
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__REDUCTEMP_:
gcc_assert (is_taskreg_ctx (ctx));
x = build_outer_var_ref (var, ctx);
x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
gimplify_and_add (x, ilist);
break;
case OMP_CLAUSE_COPYIN:
by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
append_to_statement_list (x, ©in_seq);
copyin_by_ref |= by_ref;
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
/* OpenACC reductions are initialized using the
GOACC_REDUCTION internal function. */
if (is_gimple_omp_oacc (ctx->stmt))
break;
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
gimple *tseq;
tree ptype = TREE_TYPE (placeholder);
if (cond)
{
x = error_mark_node;
if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c)
&& !task_reduction_needs_orig_p)
x = var;
else if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
{
tree pptype = build_pointer_type (ptype);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
x = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (task_reduction_cnt_full
+ task_reduction_cntorig - 1),
NULL_TREE, NULL_TREE);
else
{
unsigned int idx
= *ctx->task_reduction_map->get (c);
x = task_reduction_read (ilist, tskred_temp,
pptype, 7 + 3 * idx);
}
x = fold_convert (pptype, x);
x = build_simple_mem_ref (x);
}
}
else
{
x = build_outer_var_ref (var, ctx);
if (omp_is_reference (var)
&& !useless_type_conversion_p (ptype, TREE_TYPE (x)))
x = build_fold_addr_expr_loc (clause_loc, x);
}
SET_DECL_VALUE_EXPR (placeholder, x);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
tree new_vard = new_var;
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
}
tree rvar = NULL_TREE, *rvarp = NULL, rvar2 = NULL_TREE;
if (is_simd
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
rvarp = &rvar;
if (is_simd
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar, rvarp,
&rvar2))
{
if (new_vard == new_var)
{
gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
SET_DECL_VALUE_EXPR (new_var, ivar);
}
else
{
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (ivar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (ivar),
build_outer_var_ref (var, ctx));
if (rvarp && ctx->for_simd_scan_phase)
{
if (x)
gimplify_and_add (x, &llist[0]);
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
gimplify_and_add (x, &llist[1]);
break;
}
else if (rvarp)
{
if (x)
{
gimplify_and_add (x, &llist[0]);
tree ivar2 = unshare_expr (lvar);
TREE_OPERAND (ivar2, 1) = sctx.idx;
x = lang_hooks.decls.omp_clause_default_ctor
(c, ivar2, build_outer_var_ref (var, ctx));
gimplify_and_add (x, &llist[0]);
if (rvar2)
{
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (rvar2),
build_outer_var_ref (var, ctx));
gimplify_and_add (x, &llist[0]);
}
/* For types that need construction, add another
private var which will be default constructed
and optionally initialized with
OMP_CLAUSE_REDUCTION_GIMPLE_INIT, as in the
loop we want to assign this value instead of
constructing and destructing it in each
iteration. */
tree nv = create_tmp_var_raw (TREE_TYPE (ivar));
gimple_add_tmp_var (nv);
ctx->cb.decl_map->put (TREE_OPERAND (rvar2
? rvar2
: ivar, 0),
nv);
x = lang_hooks.decls.omp_clause_default_ctor
(c, nv, build_outer_var_ref (var, ctx));
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
x = DECL_VALUE_EXPR (new_vard);
tree vexpr = nv;
if (new_vard != new_var)
vexpr = build_fold_addr_expr (nv);
SET_DECL_VALUE_EXPR (new_vard, vexpr);
lower_omp (&tseq, ctx);
SET_DECL_VALUE_EXPR (new_vard, x);
gimple_seq_add_seq (ilist, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
}
x = lang_hooks.decls.omp_clause_dtor (c, nv);
if (x)
gimplify_and_add (x, dlist);
}
tree ref = build_outer_var_ref (var, ctx);
x = unshare_expr (ivar);
x = lang_hooks.decls.omp_clause_assign_op (c, x,
ref);
gimplify_and_add (x, &llist[0]);
ref = build_outer_var_ref (var, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, ref,
rvar);
gimplify_and_add (x, &llist[3]);
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (new_vard == new_var)
SET_DECL_VALUE_EXPR (new_var, lvar);
else
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
gimplify_and_add (x, &llist[1]);
tree ivar2 = unshare_expr (lvar);
TREE_OPERAND (ivar2, 1) = sctx.idx;
x = lang_hooks.decls.omp_clause_dtor (c, ivar2);
if (x)
gimplify_and_add (x, &llist[1]);
if (rvar2)
{
x = lang_hooks.decls.omp_clause_dtor (c, rvar2);
if (x)
gimplify_and_add (x, &llist[1]);
}
break;
}
if (x)
gimplify_and_add (x, &llist[0]);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&llist[0], tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&llist[1], tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (new_vard == new_var)
SET_DECL_VALUE_EXPR (new_var, lvar);
else
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
gimplify_and_add (x, &llist[1]);
break;
}
/* If this is a reference to constant size reduction var
with placeholder, we haven't emitted the initializer
for it because it is undesirable if SIMD arrays are used.
But if they aren't used, we need to emit the deferred
initialization now. */
else if (omp_is_reference (var) && is_simd)
handle_simd_reference (clause_loc, new_vard, ilist);
tree lab2 = NULL_TREE;
if (cond)
{
gimple *g;
if (!is_parallel_ctx (ctx))
{
tree condv = create_tmp_var (boolean_type_node);
tree m = build_simple_mem_ref (cond);
g = gimple_build_assign (condv, m);
gimple_seq_add_stmt (ilist, g);
tree lab1
= create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, condv,
boolean_false_node,
lab2, lab1);
gimple_seq_add_stmt (ilist, g);
gimple_seq_add_stmt (ilist,
gimple_build_label (lab1));
}
g = gimple_build_assign (build_simple_mem_ref (cond),
boolean_true_node);
gimple_seq_add_stmt (ilist, g);
}
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (new_var),
cond ? NULL_TREE
: build_outer_var_ref (var, ctx));
if (x)
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
{
if (ctx->for_simd_scan_phase)
goto do_dtor;
if (x || (!is_simd
&& OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c)))
{
tree nv = create_tmp_var_raw (TREE_TYPE (new_var));
gimple_add_tmp_var (nv);
ctx->cb.decl_map->put (new_vard, nv);
x = lang_hooks.decls.omp_clause_default_ctor
(c, nv, build_outer_var_ref (var, ctx));
if (x)
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
tree vexpr = nv;
if (new_vard != new_var)
vexpr = build_fold_addr_expr (nv);
SET_DECL_VALUE_EXPR (new_vard, vexpr);
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
lower_omp (&tseq, ctx);
SET_DECL_VALUE_EXPR (new_vard, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (new_vard) = 0;
gimple_seq_add_seq (ilist, tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (is_simd && ctx->scan_exclusive)
{
tree nv2
= create_tmp_var_raw (TREE_TYPE (new_var));
gimple_add_tmp_var (nv2);
ctx->cb.decl_map->put (nv, nv2);
x = lang_hooks.decls.omp_clause_default_ctor
(c, nv2, build_outer_var_ref (var, ctx));
gimplify_and_add (x, ilist);
x = lang_hooks.decls.omp_clause_dtor (c, nv2);
if (x)
gimplify_and_add (x, dlist);
}
x = lang_hooks.decls.omp_clause_dtor (c, nv);
if (x)
gimplify_and_add (x, dlist);
}
else if (is_simd
&& ctx->scan_exclusive
&& TREE_ADDRESSABLE (TREE_TYPE (new_var)))
{
tree nv2 = create_tmp_var_raw (TREE_TYPE (new_var));
gimple_add_tmp_var (nv2);
ctx->cb.decl_map->put (new_vard, nv2);
x = lang_hooks.decls.omp_clause_dtor (c, nv2);
if (x)
gimplify_and_add (x, dlist);
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
goto do_dtor;
}
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (ilist, tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (is_simd)
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (dlist, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (cond)
{
if (lab2)
gimple_seq_add_stmt (ilist, gimple_build_label (lab2));
break;
}
goto do_dtor;
}
else
{
x = omp_reduction_init (c, TREE_TYPE (new_var));
gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
if (cond)
{
gimple *g;
tree lab2 = NULL_TREE;
/* GOMP_taskgroup_reduction_register memsets the whole
array to zero. If the initializer is zero, we don't
need to initialize it again, just mark it as ever
used unconditionally, i.e. cond = true. */
if (initializer_zerop (x))
{
g = gimple_build_assign (build_simple_mem_ref (cond),
boolean_true_node);
gimple_seq_add_stmt (ilist, g);
break;
}
/* Otherwise, emit
if (!cond) { cond = true; new_var = x; } */
if (!is_parallel_ctx (ctx))
{
tree condv = create_tmp_var (boolean_type_node);
tree m = build_simple_mem_ref (cond);
g = gimple_build_assign (condv, m);
gimple_seq_add_stmt (ilist, g);
tree lab1
= create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, condv,
boolean_false_node,
lab2, lab1);
gimple_seq_add_stmt (ilist, g);
gimple_seq_add_stmt (ilist,
gimple_build_label (lab1));
}
g = gimple_build_assign (build_simple_mem_ref (cond),
boolean_true_node);
gimple_seq_add_stmt (ilist, g);
gimplify_assign (new_var, x, ilist);
if (lab2)
gimple_seq_add_stmt (ilist, gimple_build_label (lab2));
break;
}
/* reduction(-:var) sums up the partial results, so it
acts identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
tree new_vard = new_var;
if (is_simd && omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
}
tree rvar = NULL_TREE, *rvarp = NULL, rvar2 = NULL_TREE;
if (is_simd
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
rvarp = &rvar;
if (is_simd
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar, rvarp,
&rvar2))
{
if (new_vard != new_var)
{
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
tree ref = build_outer_var_ref (var, ctx);
if (rvarp)
{
if (ctx->for_simd_scan_phase)
break;
gimplify_assign (ivar, ref, &llist[0]);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, rvar, &llist[3]);
break;
}
gimplify_assign (unshare_expr (ivar), x, &llist[0]);
if (sctx.is_simt)
{
if (!simt_lane)
simt_lane = create_tmp_var (unsigned_type_node);
x = build_call_expr_internal_loc
(UNKNOWN_LOCATION, IFN_GOMP_SIMT_XCHG_BFLY,
TREE_TYPE (ivar), 2, ivar, simt_lane);
x = build2 (code, TREE_TYPE (ivar), ivar, x);
gimplify_assign (ivar, x, &llist[2]);
}
x = build2 (code, TREE_TYPE (ref), ref, ivar);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, &llist[1]);
}
else
{
if (omp_is_reference (var) && is_simd)
handle_simd_reference (clause_loc, new_vard, ilist);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
break;
gimplify_assign (new_var, x, ilist);
if (is_simd)
{
tree ref = build_outer_var_ref (var, ctx);
x = build2 (code, TREE_TYPE (ref), ref, new_var);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, dlist);
}
}
}
break;
default:
gcc_unreachable ();
}
}
}
if (tskred_avar)
{
tree clobber = build_clobber (TREE_TYPE (tskred_avar));
gimple_seq_add_stmt (ilist, gimple_build_assign (tskred_avar, clobber));
}
if (known_eq (sctx.max_vf, 1U))
{
sctx.is_simt = false;
if (ctx->lastprivate_conditional_map)
{
if (gimple_omp_for_combined_into_p (ctx->stmt))
{
/* Signal to lower_omp_1 that it should use parent context. */
ctx->combined_into_simd_safelen1 = true;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
{
tree o = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
omp_context *outer = ctx->outer;
if (gimple_code (outer->stmt) == GIMPLE_OMP_SCAN)
outer = outer->outer;
tree *v = ctx->lastprivate_conditional_map->get (o);
tree po = lookup_decl (OMP_CLAUSE_DECL (c), outer);
tree *pv = outer->lastprivate_conditional_map->get (po);
*v = *pv;
}
}
else
{
/* When not vectorized, treat lastprivate(conditional:) like
normal lastprivate, as there will be just one simd lane
writing the privatized variable. */
delete ctx->lastprivate_conditional_map;
ctx->lastprivate_conditional_map = NULL;
}
}
}
if (nonconst_simd_if)
{
if (sctx.lane == NULL_TREE)
{
sctx.idx = create_tmp_var (unsigned_type_node);
sctx.lane = create_tmp_var (unsigned_type_node);
}
/* FIXME: For now. */
sctx.is_simt = false;
}
if (sctx.lane || sctx.is_simt)
{
uid = create_tmp_var (ptr_type_node, "simduid");
/* Don't want uninit warnings on simduid, it is always uninitialized,
but we use it not for the value, but for the DECL_UID only. */
TREE_NO_WARNING (uid) = 1;
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
OMP_CLAUSE__SIMDUID__DECL (c) = uid;
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
gimple_omp_for_set_clauses (ctx->stmt, c);
}
/* Emit calls denoting privatized variables and initializing a pointer to
structure that holds private variables as fields after ompdevlow pass. */
if (sctx.is_simt)
{
sctx.simt_eargs[0] = uid;
gimple *g
= gimple_build_call_internal_vec (IFN_GOMP_SIMT_ENTER, sctx.simt_eargs);
gimple_call_set_lhs (g, uid);
gimple_seq_add_stmt (ilist, g);
sctx.simt_eargs.release ();
simtrec = create_tmp_var (ptr_type_node, ".omp_simt");
g = gimple_build_call_internal (IFN_GOMP_SIMT_ENTER_ALLOC, 1, uid);
gimple_call_set_lhs (g, simtrec);
gimple_seq_add_stmt (ilist, g);
}
if (sctx.lane)
{
gimple *g = gimple_build_call_internal (IFN_GOMP_SIMD_LANE,
2 + (nonconst_simd_if != NULL),
uid, integer_zero_node,
nonconst_simd_if);
gimple_call_set_lhs (g, sctx.lane);
gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
g = gimple_build_assign (sctx.lane, INTEGER_CST,
build_int_cst (unsigned_type_node, 0));
gimple_seq_add_stmt (ilist, g);
if (sctx.lastlane)
{
g = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
2, uid, sctx.lane);
gimple_call_set_lhs (g, sctx.lastlane);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_seq (dlist, llist[3]);
}
/* Emit reductions across SIMT lanes in log_2(simt_vf) steps. */
if (llist[2])
{
tree simt_vf = create_tmp_var (unsigned_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_VF, 0);
gimple_call_set_lhs (g, simt_vf);
gimple_seq_add_stmt (dlist, g);
tree t = build_int_cst (unsigned_type_node, 1);
g = gimple_build_assign (simt_lane, INTEGER_CST, t);
gimple_seq_add_stmt (dlist, g);
t = build_int_cst (unsigned_type_node, 0);
g = gimple_build_assign (sctx.idx, INTEGER_CST, t);
gimple_seq_add_stmt (dlist, g);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree header = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (dlist, gimple_build_goto (header));
gimple_seq_add_stmt (dlist, gimple_build_label (body));
gimple_seq_add_seq (dlist, llist[2]);
g = gimple_build_assign (simt_lane, LSHIFT_EXPR, simt_lane, integer_one_node);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (header));
g = gimple_build_cond (LT_EXPR, simt_lane, simt_vf, body, end);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (end));
}
for (int i = 0; i < 2; i++)
if (llist[i])
{
tree vf = create_tmp_var (unsigned_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
gimple_call_set_lhs (g, vf);
gimple_seq *seq = i == 0 ? ilist : dlist;
gimple_seq_add_stmt (seq, g);
tree t = build_int_cst (unsigned_type_node, 0);
g = gimple_build_assign (sctx.idx, INTEGER_CST, t);
gimple_seq_add_stmt (seq, g);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree header = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (seq, gimple_build_goto (header));
gimple_seq_add_stmt (seq, gimple_build_label (body));
gimple_seq_add_seq (seq, llist[i]);
t = build_int_cst (unsigned_type_node, 1);
g = gimple_build_assign (sctx.idx, PLUS_EXPR, sctx.idx, t);
gimple_seq_add_stmt (seq, g);
gimple_seq_add_stmt (seq, gimple_build_label (header));
g = gimple_build_cond (LT_EXPR, sctx.idx, vf, body, end);
gimple_seq_add_stmt (seq, g);
gimple_seq_add_stmt (seq, gimple_build_label (end));
}
}
if (sctx.is_simt)
{
gimple_seq_add_seq (dlist, sctx.simt_dlist);
gimple *g
= gimple_build_call_internal (IFN_GOMP_SIMT_EXIT, 1, simtrec);
gimple_seq_add_stmt (dlist, g);
}
/* The copyin sequence is not to be executed by the main thread, since
that would result in self-copies. Perhaps not visible to scalars,
but it certainly is to C++ operator=. */
if (copyin_seq)
{
x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
0);
x = build2 (NE_EXPR, boolean_type_node, x,
build_int_cst (TREE_TYPE (x), 0));
x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
gimplify_and_add (x, ilist);
}
/* If any copyin variable is passed by reference, we must ensure the
master thread doesn't modify it before it is copied over in all
threads. Similarly for variables in both firstprivate and
lastprivate clauses we need to ensure the lastprivate copying
happens after firstprivate copying in all threads. And similarly
for UDRs if initializer expression refers to omp_orig. */
if (copyin_by_ref || lastprivate_firstprivate
|| (reduction_omp_orig_ref
&& !ctx->scan_inclusive
&& !ctx->scan_exclusive))
{
/* Don't add any barrier for #pragma omp simd or
#pragma omp distribute. */
if (!is_task_ctx (ctx)
&& (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR))
gimple_seq_add_stmt (ilist, omp_build_barrier (NULL_TREE));
}
/* If max_vf is non-zero, then we can use only a vectorization factor
up to the max_vf we chose. So stick it into the safelen clause. */
if (maybe_ne (sctx.max_vf, 0U))
{
tree c = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_SAFELEN);
poly_uint64 safe_len;
if (c == NULL_TREE
|| (poly_int_tree_p (OMP_CLAUSE_SAFELEN_EXPR (c), &safe_len)
&& maybe_gt (safe_len, sctx.max_vf)))
{
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
sctx.max_vf);
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
gimple_omp_for_set_clauses (ctx->stmt, c);
}
}
}
/* Create temporary variables for lastprivate(conditional:) implementation
in context CTX with CLAUSES. */
static void
lower_lastprivate_conditional_clauses (tree *clauses, omp_context *ctx)
{
tree iter_type = NULL_TREE;
tree cond_ptr = NULL_TREE;
tree iter_var = NULL_TREE;
bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD);
tree next = *clauses;
for (tree c = *clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
{
if (is_simd)
{
tree cc = omp_find_clause (next, OMP_CLAUSE__CONDTEMP_);
gcc_assert (cc);
if (iter_type == NULL_TREE)
{
iter_type = TREE_TYPE (OMP_CLAUSE_DECL (cc));
iter_var = create_tmp_var_raw (iter_type);
DECL_CONTEXT (iter_var) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (iter_var) = 1;
DECL_CHAIN (iter_var) = ctx->block_vars;
ctx->block_vars = iter_var;
tree c3
= build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__CONDTEMP_);
OMP_CLAUSE__CONDTEMP__ITER (c3) = 1;
OMP_CLAUSE_DECL (c3) = iter_var;
OMP_CLAUSE_CHAIN (c3) = *clauses;
*clauses = c3;
ctx->lastprivate_conditional_map = new hash_map<tree, tree>;
}
next = OMP_CLAUSE_CHAIN (cc);
tree o = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
tree v = lookup_decl (OMP_CLAUSE_DECL (cc), ctx);
ctx->lastprivate_conditional_map->put (o, v);
continue;
}
if (iter_type == NULL)
{
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR)
{
struct omp_for_data fd;
omp_extract_for_data (as_a <gomp_for *> (ctx->stmt), &fd,
NULL);
iter_type = unsigned_type_for (fd.iter_type);
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
iter_type = unsigned_type_node;
tree c2 = omp_find_clause (*clauses, OMP_CLAUSE__CONDTEMP_);
if (c2)
{
cond_ptr
= lookup_decl_in_outer_ctx (OMP_CLAUSE_DECL (c2), ctx);
OMP_CLAUSE_DECL (c2) = cond_ptr;
}
else
{
cond_ptr = create_tmp_var_raw (build_pointer_type (iter_type));
DECL_CONTEXT (cond_ptr) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (cond_ptr) = 1;
DECL_CHAIN (cond_ptr) = ctx->block_vars;
ctx->block_vars = cond_ptr;
c2 = build_omp_clause (UNKNOWN_LOCATION,
OMP_CLAUSE__CONDTEMP_);
OMP_CLAUSE_DECL (c2) = cond_ptr;
OMP_CLAUSE_CHAIN (c2) = *clauses;
*clauses = c2;
}
iter_var = create_tmp_var_raw (iter_type);
DECL_CONTEXT (iter_var) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (iter_var) = 1;
DECL_CHAIN (iter_var) = ctx->block_vars;
ctx->block_vars = iter_var;
tree c3
= build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__CONDTEMP_);
OMP_CLAUSE__CONDTEMP__ITER (c3) = 1;
OMP_CLAUSE_DECL (c3) = iter_var;
OMP_CLAUSE_CHAIN (c3) = OMP_CLAUSE_CHAIN (c2);
OMP_CLAUSE_CHAIN (c2) = c3;
ctx->lastprivate_conditional_map = new hash_map<tree, tree>;
}
tree v = create_tmp_var_raw (iter_type);
DECL_CONTEXT (v) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (v) = 1;
DECL_CHAIN (v) = ctx->block_vars;
ctx->block_vars = v;
tree o = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
ctx->lastprivate_conditional_map->put (o, v);
}
}
/* Generate code to implement the LASTPRIVATE clauses. This is used for
both parallel and workshare constructs. PREDICATE may be NULL if it's
always true. BODY_P is the sequence to insert early initialization
if needed, STMT_LIST is where the non-conditional lastprivate handling
goes into and CSTMT_LIST is a sequence that needs to be run in a critical
section. */
static void
lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *body_p,
gimple_seq *stmt_list, gimple_seq *cstmt_list,
omp_context *ctx)
{
tree x, c, label = NULL, orig_clauses = clauses;
bool par_clauses = false;
tree simduid = NULL, lastlane = NULL, simtcond = NULL, simtlast = NULL;
unsigned HOST_WIDE_INT conditional_off = 0;
gimple_seq post_stmt_list = NULL;
/* Early exit if there are no lastprivate or linear clauses. */
for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
|| (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
break;
if (clauses == NULL)
{
/* If this was a workshare clause, see if it had been combined
with its parallel. In that case, look for the clauses on the
parallel statement itself. */
if (is_parallel_ctx (ctx))
return;
ctx = ctx->outer;
if (ctx == NULL || !is_parallel_ctx (ctx))
return;
clauses = omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE_LASTPRIVATE);
if (clauses == NULL)
return;
par_clauses = true;
}
bool maybe_simt = false;
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
{
maybe_simt = omp_find_clause (orig_clauses, OMP_CLAUSE__SIMT_);
simduid = omp_find_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
if (simduid)
simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
}
if (predicate)
{
gcond *stmt;
tree label_true, arm1, arm2;
enum tree_code pred_code = TREE_CODE (predicate);
label = create_artificial_label (UNKNOWN_LOCATION);
label_true = create_artificial_label (UNKNOWN_LOCATION);
if (TREE_CODE_CLASS (pred_code) == tcc_comparison)
{
arm1 = TREE_OPERAND (predicate, 0);
arm2 = TREE_OPERAND (predicate, 1);
gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
}
else
{
arm1 = predicate;
gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
arm2 = boolean_false_node;
pred_code = NE_EXPR;
}
if (maybe_simt)
{
c = build2 (pred_code, boolean_type_node, arm1, arm2);
c = fold_convert (integer_type_node, c);
simtcond = create_tmp_var (integer_type_node);
gimplify_assign (simtcond, c, stmt_list);
gcall *g = gimple_build_call_internal (IFN_GOMP_SIMT_VOTE_ANY,
1, simtcond);
c = create_tmp_var (integer_type_node);
gimple_call_set_lhs (g, c);
gimple_seq_add_stmt (stmt_list, g);
stmt = gimple_build_cond (NE_EXPR, c, integer_zero_node,
label_true, label);
}
else
stmt = gimple_build_cond (pred_code, arm1, arm2, label_true, label);
gimple_seq_add_stmt (stmt_list, stmt);
gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
}
tree cond_ptr = NULL_TREE;
for (c = clauses; c ;)
{
tree var, new_var;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
gimple_seq *this_stmt_list = stmt_list;
tree lab2 = NULL_TREE;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
&& ctx->lastprivate_conditional_map
&& !ctx->combined_into_simd_safelen1)
{
gcc_assert (body_p);
if (simduid)
goto next;
if (cond_ptr == NULL_TREE)
{
cond_ptr = omp_find_clause (orig_clauses, OMP_CLAUSE__CONDTEMP_);
cond_ptr = OMP_CLAUSE_DECL (cond_ptr);
}
tree type = TREE_TYPE (TREE_TYPE (cond_ptr));
tree o = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
tree v = *ctx->lastprivate_conditional_map->get (o);
gimplify_assign (v, build_zero_cst (type), body_p);
this_stmt_list = cstmt_list;
tree mem;
if (POINTER_TYPE_P (TREE_TYPE (cond_ptr)))
{
mem = build2 (MEM_REF, type, cond_ptr,
build_int_cst (TREE_TYPE (cond_ptr),
conditional_off));
conditional_off += tree_to_uhwi (TYPE_SIZE_UNIT (type));
}
else
mem = build4 (ARRAY_REF, type, cond_ptr,
size_int (conditional_off++), NULL_TREE, NULL_TREE);
tree mem2 = copy_node (mem);
gimple_seq seq = NULL;
mem = force_gimple_operand (mem, &seq, true, NULL_TREE);
gimple_seq_add_seq (this_stmt_list, seq);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
gimple *g = gimple_build_cond (GT_EXPR, v, mem, lab1, lab2);
gimple_seq_add_stmt (this_stmt_list, g);
gimple_seq_add_stmt (this_stmt_list, gimple_build_label (lab1));
gimplify_assign (mem2, v, this_stmt_list);
}
else if (predicate
&& ctx->combined_into_simd_safelen1
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
&& ctx->lastprivate_conditional_map)
this_stmt_list = &post_stmt_list;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
{
var = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
&& is_taskloop_ctx (ctx))
{
gcc_checking_assert (ctx->outer && is_task_ctx (ctx->outer));
new_var = lookup_decl (var, ctx->outer);
}
else
{
new_var = lookup_decl (var, ctx);
/* Avoid uninitialized warnings for lastprivate and
for linear iterators. */
if (predicate
&& (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
|| OMP_CLAUSE_LINEAR_NO_COPYIN (c)))
TREE_NO_WARNING (new_var) = 1;
}
if (!maybe_simt && simduid && DECL_HAS_VALUE_EXPR_P (new_var))
{
tree val = DECL_VALUE_EXPR (new_var);
if (TREE_CODE (val) == ARRAY_REF
&& VAR_P (TREE_OPERAND (val, 0))
&& lookup_attribute ("omp simd array",
DECL_ATTRIBUTES (TREE_OPERAND (val,
0))))
{
if (lastlane == NULL)
{
lastlane = create_tmp_var (unsigned_type_node);
gcall *g
= gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
2, simduid,
TREE_OPERAND (val, 1));
gimple_call_set_lhs (g, lastlane);
gimple_seq_add_stmt (this_stmt_list, g);
}
new_var = build4 (ARRAY_REF, TREE_TYPE (val),
TREE_OPERAND (val, 0), lastlane,
NULL_TREE, NULL_TREE);
TREE_THIS_NOTRAP (new_var) = 1;
}
}
else if (maybe_simt)
{
tree val = (DECL_HAS_VALUE_EXPR_P (new_var)
? DECL_VALUE_EXPR (new_var)
: new_var);
if (simtlast == NULL)
{
simtlast = create_tmp_var (unsigned_type_node);
gcall *g = gimple_build_call_internal
(IFN_GOMP_SIMT_LAST_LANE, 1, simtcond);
gimple_call_set_lhs (g, simtlast);
gimple_seq_add_stmt (this_stmt_list, g);
}
x = build_call_expr_internal_loc
(UNKNOWN_LOCATION, IFN_GOMP_SIMT_XCHG_IDX,
TREE_TYPE (val), 2, val, simtlast);
new_var = unshare_expr (new_var);
gimplify_assign (new_var, x, this_stmt_list);
new_var = unshare_expr (new_var);
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
{
lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
gimple_seq_add_seq (this_stmt_list,
OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
{
lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
gimple_seq_add_seq (this_stmt_list,
OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
}
x = NULL_TREE;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c)
&& is_taskloop_ctx (ctx))
{
tree ovar = maybe_lookup_decl_in_outer_ctx (var,
ctx->outer->outer);
if (is_global_var (ovar))
x = ovar;
}
if (!x)
x = build_outer_var_ref (var, ctx, OMP_CLAUSE_LASTPRIVATE);
if (omp_is_reference (var))
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
gimplify_and_add (x, this_stmt_list);
if (lab2)
gimple_seq_add_stmt (this_stmt_list, gimple_build_label (lab2));
}
next:
c = OMP_CLAUSE_CHAIN (c);
if (c == NULL && !par_clauses)
{
/* If this was a workshare clause, see if it had been combined
with its parallel. In that case, continue looking for the
clauses also on the parallel statement itself. */
if (is_parallel_ctx (ctx))
break;
ctx = ctx->outer;
if (ctx == NULL || !is_parallel_ctx (ctx))
break;
c = omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE_LASTPRIVATE);
par_clauses = true;
}
}
if (label)
gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
gimple_seq_add_seq (stmt_list, post_stmt_list);
}
/* Lower the OpenACC reductions of CLAUSES for compute axis LEVEL
(which might be a placeholder). INNER is true if this is an inner
axis of a multi-axis loop. FORK and JOIN are (optional) fork and
join markers. Generate the before-loop forking sequence in
FORK_SEQ and the after-loop joining sequence to JOIN_SEQ. The
general form of these sequences is
GOACC_REDUCTION_SETUP
GOACC_FORK
GOACC_REDUCTION_INIT
...
GOACC_REDUCTION_FINI
GOACC_JOIN
GOACC_REDUCTION_TEARDOWN. */
static void
lower_oacc_reductions (location_t loc, tree clauses, tree level, bool inner,
gcall *fork, gcall *join, gimple_seq *fork_seq,
gimple_seq *join_seq, omp_context *ctx)
{
gimple_seq before_fork = NULL;
gimple_seq after_fork = NULL;
gimple_seq before_join = NULL;
gimple_seq after_join = NULL;
tree init_code = NULL_TREE, fini_code = NULL_TREE,
setup_code = NULL_TREE, teardown_code = NULL_TREE;
unsigned offset = 0;
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
/* No 'reduction' clauses on OpenACC 'kernels'. */
gcc_checking_assert (!is_oacc_kernels (ctx));
tree orig = OMP_CLAUSE_DECL (c);
tree var = maybe_lookup_decl (orig, ctx);
tree ref_to_res = NULL_TREE;
tree incoming, outgoing, v1, v2, v3;
bool is_private = false;
enum tree_code rcode = OMP_CLAUSE_REDUCTION_CODE (c);
if (rcode == MINUS_EXPR)
rcode = PLUS_EXPR;
else if (rcode == TRUTH_ANDIF_EXPR)
rcode = BIT_AND_EXPR;
else if (rcode == TRUTH_ORIF_EXPR)
rcode = BIT_IOR_EXPR;
tree op = build_int_cst (unsigned_type_node, rcode);
if (!var)
var = orig;
incoming = outgoing = var;
if (!inner)
{
/* See if an outer construct also reduces this variable. */
omp_context *outer = ctx;
while (omp_context *probe = outer->outer)
{
enum gimple_code type = gimple_code (probe->stmt);
tree cls;
switch (type)
{
case GIMPLE_OMP_FOR:
cls = gimple_omp_for_clauses (probe->stmt);
break;
case GIMPLE_OMP_TARGET:
/* No 'reduction' clauses inside OpenACC 'kernels'
regions. */
gcc_checking_assert (!is_oacc_kernels (probe));
if (!is_gimple_omp_offloaded (probe->stmt))
goto do_lookup;
cls = gimple_omp_target_clauses (probe->stmt);
break;
default:
goto do_lookup;
}
outer = probe;
for (; cls; cls = OMP_CLAUSE_CHAIN (cls))
if (OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_REDUCTION
&& orig == OMP_CLAUSE_DECL (cls))
{
incoming = outgoing = lookup_decl (orig, probe);
goto has_outer_reduction;
}
else if ((OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_PRIVATE)
&& orig == OMP_CLAUSE_DECL (cls))
{
is_private = true;
goto do_lookup;
}
}
do_lookup:
/* This is the outermost construct with this reduction,
see if there's a mapping for it. */
if (gimple_code (outer->stmt) == GIMPLE_OMP_TARGET
&& maybe_lookup_field (orig, outer) && !is_private)
{
ref_to_res = build_receiver_ref (orig, false, outer);
if (omp_is_reference (orig))
ref_to_res = build_simple_mem_ref (ref_to_res);
tree type = TREE_TYPE (var);
if (POINTER_TYPE_P (type))
type = TREE_TYPE (type);
outgoing = var;
incoming = omp_reduction_init_op (loc, rcode, type);
}
else
{
/* Try to look at enclosing contexts for reduction var,
use original if no mapping found. */
tree t = NULL_TREE;
omp_context *c = ctx->outer;
while (c && !t)
{
t = maybe_lookup_decl (orig, c);
c = c->outer;
}
incoming = outgoing = (t ? t : orig);
}
has_outer_reduction:;
}
if (!ref_to_res)
ref_to_res = integer_zero_node;
if (omp_is_reference (orig))
{
tree type = TREE_TYPE (var);
const char *id = IDENTIFIER_POINTER (DECL_NAME (var));
if (!inner)
{
tree x = create_tmp_var (TREE_TYPE (type), id);
gimplify_assign (var, build_fold_addr_expr (x), fork_seq);
}
v1 = create_tmp_var (type, id);
v2 = create_tmp_var (type, id);
v3 = create_tmp_var (type, id);
gimplify_assign (v1, var, fork_seq);
gimplify_assign (v2, var, fork_seq);
gimplify_assign (v3, var, fork_seq);
var = build_simple_mem_ref (var);
v1 = build_simple_mem_ref (v1);
v2 = build_simple_mem_ref (v2);
v3 = build_simple_mem_ref (v3);
outgoing = build_simple_mem_ref (outgoing);
if (!TREE_CONSTANT (incoming))
incoming = build_simple_mem_ref (incoming);
}
else
v1 = v2 = v3 = var;
/* Determine position in reduction buffer, which may be used
by target. The parser has ensured that this is not a
variable-sized type. */
fixed_size_mode mode
= as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (var)));
unsigned align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
offset = (offset + align - 1) & ~(align - 1);
tree off = build_int_cst (sizetype, offset);
offset += GET_MODE_SIZE (mode);
if (!init_code)
{
init_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_INIT);
fini_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_FINI);
setup_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_SETUP);
teardown_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_TEARDOWN);
}
tree setup_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, setup_code,
unshare_expr (ref_to_res),
incoming, level, op, off);
tree init_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, init_code,
unshare_expr (ref_to_res),
v1, level, op, off);
tree fini_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, fini_code,
unshare_expr (ref_to_res),
v2, level, op, off);
tree teardown_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, teardown_code,
ref_to_res, v3, level, op, off);
gimplify_assign (v1, setup_call, &before_fork);
gimplify_assign (v2, init_call, &after_fork);
gimplify_assign (v3, fini_call, &before_join);
gimplify_assign (outgoing, teardown_call, &after_join);
}
/* Now stitch things together. */
gimple_seq_add_seq (fork_seq, before_fork);
if (fork)
gimple_seq_add_stmt (fork_seq, fork);
gimple_seq_add_seq (fork_seq, after_fork);
gimple_seq_add_seq (join_seq, before_join);
if (join)
gimple_seq_add_stmt (join_seq, join);
gimple_seq_add_seq (join_seq, after_join);
}
/* Generate code to implement the REDUCTION clauses, append it
to STMT_SEQP. CLIST if non-NULL is a pointer to a sequence
that should be emitted also inside of the critical section,
in that case clear *CLIST afterwards, otherwise leave it as is
and let the caller emit it itself. */
static void
lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp,
gimple_seq *clist, omp_context *ctx)
{
gimple_seq sub_seq = NULL;
gimple *stmt;
tree x, c;
int count = 0;
/* OpenACC loop reductions are handled elsewhere. */
if (is_gimple_omp_oacc (ctx->stmt))
return;
/* SIMD reductions are handled in lower_rec_input_clauses. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
return;
/* inscan reductions are handled elsewhere. */
if (ctx->scan_inclusive || ctx->scan_exclusive)
return;
/* First see if there is exactly one reduction clause. Use OMP_ATOMIC
update in that case, otherwise use a lock. */
for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& !OMP_CLAUSE_REDUCTION_TASK (c))
{
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
|| TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
{
/* Never use OMP_ATOMIC for array reductions or UDRs. */
count = -1;
break;
}
count++;
}
if (count == 0)
return;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, ref, new_var, orig_var;
enum tree_code code;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_REDUCTION_TASK (c))
continue;
enum omp_clause_code ccode = OMP_CLAUSE_REDUCTION;
orig_var = var = OMP_CLAUSE_DECL (c);
if (TREE_CODE (var) == MEM_REF)
{
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == POINTER_PLUS_EXPR)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == ADDR_EXPR)
var = TREE_OPERAND (var, 0);
else
{
/* If this is a pointer or referenced based array
section, the var could be private in the outer
context e.g. on orphaned loop construct. Pretend this
is private variable's outer reference. */
ccode = OMP_CLAUSE_PRIVATE;
if (TREE_CODE (var) == INDIRECT_REF)
var = TREE_OPERAND (var, 0);
}
orig_var = var;
if (is_variable_sized (var))
{
gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
var = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var) == INDIRECT_REF);
var = TREE_OPERAND (var, 0);
gcc_assert (DECL_P (var));
}
}
new_var = lookup_decl (var, ctx);
if (var == OMP_CLAUSE_DECL (c) && omp_is_reference (var))
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
ref = build_outer_var_ref (var, ctx, ccode);
code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it acts
identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
if (count == 1)
{
tree addr = build_fold_addr_expr_loc (clause_loc, ref);
addr = save_expr (addr);
ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
x = build2 (OMP_ATOMIC, void_type_node, addr, x);
OMP_ATOMIC_MEMORY_ORDER (x) = OMP_MEMORY_ORDER_RELAXED;
gimplify_and_add (x, stmt_seqp);
return;
}
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
{
tree d = OMP_CLAUSE_DECL (c);
tree type = TREE_TYPE (d);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
tree i = create_tmp_var (TREE_TYPE (v));
tree ptype = build_pointer_type (TREE_TYPE (type));
tree bias = TREE_OPERAND (d, 1);
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == POINTER_PLUS_EXPR)
{
tree b = TREE_OPERAND (d, 1);
b = maybe_lookup_decl (b, ctx);
if (b == NULL)
{
b = TREE_OPERAND (d, 1);
b = maybe_lookup_decl_in_outer_ctx (b, ctx);
}
if (integer_zerop (bias))
bias = b;
else
{
bias = fold_convert_loc (clause_loc, TREE_TYPE (b), bias);
bias = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (b), b, bias);
}
d = TREE_OPERAND (d, 0);
}
/* For ref build_outer_var_ref already performs this, so
only new_var needs a dereference. */
if (TREE_CODE (d) == INDIRECT_REF)
{
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
gcc_assert (omp_is_reference (var) && var == orig_var);
}
else if (TREE_CODE (d) == ADDR_EXPR)
{
if (orig_var == var)
{
new_var = build_fold_addr_expr (new_var);
ref = build_fold_addr_expr (ref);
}
}
else
{
gcc_assert (orig_var == var);
if (omp_is_reference (var))
ref = build_fold_addr_expr (ref);
}
if (DECL_P (v))
{
tree t = maybe_lookup_decl (v, ctx);
if (t)
v = t;
else
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
gimplify_expr (&v, stmt_seqp, NULL, is_gimple_val, fb_rvalue);
}
if (!integer_zerop (bias))
{
bias = fold_convert_loc (clause_loc, sizetype, bias);
new_var = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (new_var), new_var,
unshare_expr (bias));
ref = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (ref), ref, bias);
}
new_var = fold_convert_loc (clause_loc, ptype, new_var);
ref = fold_convert_loc (clause_loc, ptype, ref);
tree m = create_tmp_var (ptype);
gimplify_assign (m, new_var, stmt_seqp);
new_var = m;
m = create_tmp_var (ptype);
gimplify_assign (m, ref, stmt_seqp);
ref = m;
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), stmt_seqp);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (&sub_seq, gimple_build_label (body));
tree priv = build_simple_mem_ref_loc (clause_loc, new_var);
tree out = build_simple_mem_ref_loc (clause_loc, ref);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree decl_placeholder
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
SET_DECL_VALUE_EXPR (placeholder, out);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
SET_DECL_VALUE_EXPR (decl_placeholder, priv);
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (&sub_seq,
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = NULL;
}
else
{
x = build2 (code, TREE_TYPE (out), out, priv);
out = unshare_expr (out);
gimplify_assign (out, x, &sub_seq);
}
gimple *g = gimple_build_assign (new_var, POINTER_PLUS_EXPR, new_var,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_assign (ref, POINTER_PLUS_EXPR, ref,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_assign (i, PLUS_EXPR, i,
build_int_cst (TREE_TYPE (i), 1));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_cond (LE_EXPR, i, v, body, end);
gimple_seq_add_stmt (&sub_seq, g);
gimple_seq_add_stmt (&sub_seq, gimple_build_label (end));
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
if (omp_is_reference (var)
&& !useless_type_conversion_p (TREE_TYPE (placeholder),
TREE_TYPE (ref)))
ref = build_fold_addr_expr_loc (clause_loc, ref);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
}
else
{
x = build2 (code, TREE_TYPE (ref), ref, new_var);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, &sub_seq);
}
}
stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
0);
gimple_seq_add_stmt (stmt_seqp, stmt);
gimple_seq_add_seq (stmt_seqp, sub_seq);
if (clist)
{
gimple_seq_add_seq (stmt_seqp, *clist);
*clist = NULL;
}
stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
0);
gimple_seq_add_stmt (stmt_seqp, stmt);
}
/* Generate code to implement the COPYPRIVATE clauses. */
static void
lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
omp_context *ctx)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, new_var, ref, x;
bool by_ref;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
continue;
var = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (var, NULL);
ref = build_sender_ref (var, ctx);
x = new_var = lookup_decl_in_outer_ctx (var, ctx);
if (by_ref)
{
x = build_fold_addr_expr_loc (clause_loc, new_var);
x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
}
gimplify_assign (ref, x, slist);
ref = build_receiver_ref (var, false, ctx);
if (by_ref)
{
ref = fold_convert_loc (clause_loc,
build_pointer_type (TREE_TYPE (new_var)),
ref);
ref = build_fold_indirect_ref_loc (clause_loc, ref);
}
if (omp_is_reference (var))
{
ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
ref = build_simple_mem_ref_loc (clause_loc, ref);
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
}
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
gimplify_and_add (x, rlist);
}
}
/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
and REDUCTION from the sender (aka parent) side. */
static void
lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
omp_context *ctx)
{
tree c, t;
int ignored_looptemp = 0;
bool is_taskloop = false;
/* For taskloop, ignore first two _looptemp_ clauses, those are initialized
by GOMP_taskloop. */
if (is_task_ctx (ctx) && gimple_omp_task_taskloop_p (ctx->stmt))
{
ignored_looptemp = 2;
is_taskloop = true;
}
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree val, ref, x, var;
bool by_ref, do_in = false, do_out = false;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
break;
continue;
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_IN_REDUCTION:
case OMP_CLAUSE__REDUCTEMP_:
break;
case OMP_CLAUSE_REDUCTION:
if (is_task_ctx (ctx) || OMP_CLAUSE_REDUCTION_TASK (c))
continue;
break;
case OMP_CLAUSE_SHARED:
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
break;
continue;
case OMP_CLAUSE__LOOPTEMP_:
if (ignored_looptemp)
{
ignored_looptemp--;
continue;
}
break;
default:
continue;
}
val = OMP_CLAUSE_DECL (c);
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION)
&& TREE_CODE (val) == MEM_REF)
{
val = TREE_OPERAND (val, 0);
if (TREE_CODE (val) == POINTER_PLUS_EXPR)
val = TREE_OPERAND (val, 0);
if (TREE_CODE (val) == INDIRECT_REF
|| TREE_CODE (val) == ADDR_EXPR)
val = TREE_OPERAND (val, 0);
if (is_variable_sized (val))
continue;
}
/* For OMP_CLAUSE_SHARED_FIRSTPRIVATE, look beyond the
outer taskloop region. */
omp_context *ctx_for_o = ctx;
if (is_taskloop
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
ctx_for_o = ctx->outer;
var = lookup_decl_in_outer_ctx (val, ctx_for_o);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
&& is_global_var (var)
&& (val == OMP_CLAUSE_DECL (c)
|| !is_task_ctx (ctx)
|| (TREE_CODE (TREE_TYPE (val)) != POINTER_TYPE
&& (TREE_CODE (TREE_TYPE (val)) != REFERENCE_TYPE
|| (TREE_CODE (TREE_TYPE (TREE_TYPE (val)))
!= POINTER_TYPE)))))
continue;
t = omp_member_access_dummy_var (var);
if (t)
{
var = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx_for_o);
if (o != t)
var = unshare_and_remap (var, t, o);
else
var = unshare_expr (var);
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
{
/* Handle taskloop firstprivate/lastprivate, where the
lastprivate on GIMPLE_OMP_TASK is represented as
OMP_CLAUSE_SHARED_FIRSTPRIVATE. */
tree f = lookup_sfield ((splay_tree_key) &DECL_UID (val), ctx);
x = omp_build_component_ref (ctx->sender_decl, f);
if (use_pointer_for_field (val, ctx))
var = build_fold_addr_expr (var);
gimplify_assign (x, var, ilist);
DECL_ABSTRACT_ORIGIN (f) = NULL;
continue;
}
if (((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IN_REDUCTION)
|| val == OMP_CLAUSE_DECL (c))
&& is_variable_sized (val))
continue;
by_ref = use_pointer_for_field (val, NULL);
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_FIRSTPRIVATE:
if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c)
&& !by_ref
&& is_task_ctx (ctx))
TREE_NO_WARNING (var) = 1;
do_in = true;
break;
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__REDUCTEMP_:
do_in = true;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (by_ref || omp_is_reference (val))
{
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
continue;
do_in = true;
}
else
{
do_out = true;
if (lang_hooks.decls.omp_private_outer_ref (val))
do_in = true;
}
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
do_in = true;
if (val == OMP_CLAUSE_DECL (c))
{
if (is_task_ctx (ctx))
by_ref = use_pointer_for_field (val, ctx);
else
do_out = !(by_ref || omp_is_reference (val));
}
else
by_ref = TREE_CODE (TREE_TYPE (val)) == ARRAY_TYPE;
break;
default:
gcc_unreachable ();
}
if (do_in)
{
ref = build_sender_ref (val, ctx);
x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
gimplify_assign (ref, x, ilist);
if (is_task_ctx (ctx))
DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
}
if (do_out)
{
ref = build_sender_ref (val, ctx);
gimplify_assign (var, ref, olist);
}
}
}
/* Generate code to implement SHARED from the sender (aka parent)
side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
list things that got automatically shared. */
static void
lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
{
tree var, ovar, nvar, t, f, x, record_type;
if (ctx->record_type == NULL)
return;
record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
{
ovar = DECL_ABSTRACT_ORIGIN (f);
if (!ovar || TREE_CODE (ovar) == FIELD_DECL)
continue;
nvar = maybe_lookup_decl (ovar, ctx);
if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
continue;
/* If CTX is a nested parallel directive. Find the immediately
enclosing parallel or workshare construct that contains a
mapping for OVAR. */
var = lookup_decl_in_outer_ctx (ovar, ctx);
t = omp_member_access_dummy_var (var);
if (t)
{
var = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
if (o != t)
var = unshare_and_remap (var, t, o);
else
var = unshare_expr (var);
}
if (use_pointer_for_field (ovar, ctx))
{
x = build_sender_ref (ovar, ctx);
if (TREE_CODE (TREE_TYPE (f)) == ARRAY_TYPE
&& TREE_TYPE (f) == TREE_TYPE (ovar))
{
gcc_assert (is_parallel_ctx (ctx)
&& DECL_ARTIFICIAL (ovar));
/* _condtemp_ clause. */
var = build_constructor (TREE_TYPE (x), NULL);
}
else
var = build_fold_addr_expr (var);
gimplify_assign (x, var, ilist);
}
else
{
x = build_sender_ref (ovar, ctx);
gimplify_assign (x, var, ilist);
if (!TREE_READONLY (var)
/* We don't need to receive a new reference to a result
or parm decl. In fact we may not store to it as we will
invalidate any pending RSO and generate wrong gimple
during inlining. */
&& !((TREE_CODE (var) == RESULT_DECL
|| TREE_CODE (var) == PARM_DECL)
&& DECL_BY_REFERENCE (var)))
{
x = build_sender_ref (ovar, ctx);
gimplify_assign (var, x, olist);
}
}
}
}
/* Emit an OpenACC head marker call, encapulating the partitioning and
other information that must be processed by the target compiler.
Return the maximum number of dimensions the associated loop might
be partitioned over. */
static unsigned
lower_oacc_head_mark (location_t loc, tree ddvar, tree clauses,
gimple_seq *seq, omp_context *ctx)
{
unsigned levels = 0;
unsigned tag = 0;
tree gang_static = NULL_TREE;
auto_vec<tree, 5> args;
args.quick_push (build_int_cst
(integer_type_node, IFN_UNIQUE_OACC_HEAD_MARK));
args.quick_push (ddvar);
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
tag |= OLF_DIM_GANG;
gang_static = OMP_CLAUSE_GANG_STATIC_EXPR (c);
/* static:* is represented by -1, and we can ignore it, as
scheduling is always static. */
if (gang_static && integer_minus_onep (gang_static))
gang_static = NULL_TREE;
levels++;
break;
case OMP_CLAUSE_WORKER:
tag |= OLF_DIM_WORKER;
levels++;
break;
case OMP_CLAUSE_VECTOR:
tag |= OLF_DIM_VECTOR;
levels++;
break;
case OMP_CLAUSE_SEQ:
tag |= OLF_SEQ;
break;
case OMP_CLAUSE_AUTO:
tag |= OLF_AUTO;
break;
case OMP_CLAUSE_INDEPENDENT:
tag |= OLF_INDEPENDENT;
break;
case OMP_CLAUSE_TILE:
tag |= OLF_TILE;
break;
default:
continue;
}
}
if (gang_static)
{
if (DECL_P (gang_static))
gang_static = build_outer_var_ref (gang_static, ctx);
tag |= OLF_GANG_STATIC;
}
omp_context *tgt = enclosing_target_ctx (ctx);
if (!tgt || is_oacc_parallel_or_serial (tgt))
;
else if (is_oacc_kernels (tgt))
/* Not using this loops handling inside OpenACC 'kernels' regions. */
gcc_unreachable ();
else
gcc_unreachable ();
/* In a parallel region, loops are implicitly INDEPENDENT. */
if (!tgt || is_oacc_parallel_or_serial (tgt))
tag |= OLF_INDEPENDENT;
if (tag & OLF_TILE)
/* Tiling could use all 3 levels. */
levels = 3;
else
{
/* A loop lacking SEQ, GANG, WORKER and/or VECTOR could be AUTO.
Ensure at least one level, or 2 for possible auto
partitioning */
bool maybe_auto = !(tag & (((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1)
<< OLF_DIM_BASE) | OLF_SEQ));
if (levels < 1u + maybe_auto)
levels = 1u + maybe_auto;
}
args.quick_push (build_int_cst (integer_type_node, levels));
args.quick_push (build_int_cst (integer_type_node, tag));
if (gang_static)
args.quick_push (gang_static);
gcall *call = gimple_build_call_internal_vec (IFN_UNIQUE, args);
gimple_set_location (call, loc);
gimple_set_lhs (call, ddvar);
gimple_seq_add_stmt (seq, call);
return levels;
}
/* Emit an OpenACC lopp head or tail marker to SEQ. LEVEL is the
partitioning level of the enclosed region. */
static void
lower_oacc_loop_marker (location_t loc, tree ddvar, bool head,
tree tofollow, gimple_seq *seq)
{
int marker_kind = (head ? IFN_UNIQUE_OACC_HEAD_MARK
: IFN_UNIQUE_OACC_TAIL_MARK);
tree marker = build_int_cst (integer_type_node, marker_kind);
int nargs = 2 + (tofollow != NULL_TREE);
gcall *call = gimple_build_call_internal (IFN_UNIQUE, nargs,
marker, ddvar, tofollow);
gimple_set_location (call, loc);
gimple_set_lhs (call, ddvar);
gimple_seq_add_stmt (seq, call);
}
/* Generate the before and after OpenACC loop sequences. CLAUSES are
the loop clauses, from which we extract reductions. Initialize
HEAD and TAIL. */
static void
lower_oacc_head_tail (location_t loc, tree clauses,
gimple_seq *head, gimple_seq *tail, omp_context *ctx)
{
bool inner = false;
tree ddvar = create_tmp_var (integer_type_node, ".data_dep");
gimple_seq_add_stmt (head, gimple_build_assign (ddvar, integer_zero_node));
unsigned count = lower_oacc_head_mark (loc, ddvar, clauses, head, ctx);
tree fork_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_FORK);
tree join_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_JOIN);
gcc_assert (count);
for (unsigned done = 1; count; count--, done++)
{
gimple_seq fork_seq = NULL;
gimple_seq join_seq = NULL;
tree place = build_int_cst (integer_type_node, -1);
gcall *fork = gimple_build_call_internal (IFN_UNIQUE, 3,
fork_kind, ddvar, place);
gimple_set_location (fork, loc);
gimple_set_lhs (fork, ddvar);
gcall *join = gimple_build_call_internal (IFN_UNIQUE, 3,
join_kind, ddvar, place);
gimple_set_location (join, loc);
gimple_set_lhs (join, ddvar);
/* Mark the beginning of this level sequence. */
if (inner)
lower_oacc_loop_marker (loc, ddvar, true,
build_int_cst (integer_type_node, count),
&fork_seq);
lower_oacc_loop_marker (loc, ddvar, false,
build_int_cst (integer_type_node, done),
&join_seq);
lower_oacc_reductions (loc, clauses, place, inner,
fork, join, &fork_seq, &join_seq, ctx);
/* Append this level to head. */
gimple_seq_add_seq (head, fork_seq);
/* Prepend it to tail. */
gimple_seq_add_seq (&join_seq, *tail);
*tail = join_seq;
inner = true;
}
/* Mark the end of the sequence. */
lower_oacc_loop_marker (loc, ddvar, true, NULL_TREE, head);
lower_oacc_loop_marker (loc, ddvar, false, NULL_TREE, tail);
}
/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
catch handler and return it. This prevents programs from violating the
structured block semantics with throws. */
static gimple_seq
maybe_catch_exception (gimple_seq body)
{
gimple *g;
tree decl;
if (!flag_exceptions)
return body;
if (lang_hooks.eh_protect_cleanup_actions != NULL)
decl = lang_hooks.eh_protect_cleanup_actions ();
else
decl = builtin_decl_explicit (BUILT_IN_TRAP);
g = gimple_build_eh_must_not_throw (decl);
g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
GIMPLE_TRY_CATCH);
return gimple_seq_alloc_with_stmt (g);
}
/* Routines to lower OMP directives into OMP-GIMPLE. */
/* If ctx is a worksharing context inside of a cancellable parallel
region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
and conditional branch to parallel's cancel_label to handle
cancellation in the implicit barrier. */
static void
maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple *omp_return,
gimple_seq *body)
{
gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
if (gimple_omp_return_nowait_p (omp_return))
return;
for (omp_context *outer = ctx->outer; outer; outer = outer->outer)
if (gimple_code (outer->stmt) == GIMPLE_OMP_PARALLEL
&& outer->cancellable)
{
tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
tree lhs = create_tmp_var (c_bool_type);
gimple_omp_return_set_lhs (omp_return, lhs);
tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
gimple *g = gimple_build_cond (NE_EXPR, lhs,
fold_convert (c_bool_type,
boolean_false_node),
outer->cancel_label, fallthru_label);
gimple_seq_add_stmt (body, g);
gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
}
else if (gimple_code (outer->stmt) != GIMPLE_OMP_TASKGROUP)
return;
}
/* Find the first task_reduction or reduction clause or return NULL
if there are none. */
static inline tree
omp_task_reductions_find_first (tree clauses, enum tree_code code,
enum omp_clause_code ccode)
{
while (1)
{
clauses = omp_find_clause (clauses, ccode);
if (clauses == NULL_TREE)
return NULL_TREE;
if (ccode != OMP_CLAUSE_REDUCTION
|| code == OMP_TASKLOOP
|| OMP_CLAUSE_REDUCTION_TASK (clauses))
return clauses;
clauses = OMP_CLAUSE_CHAIN (clauses);
}
}
static void lower_omp_task_reductions (omp_context *, enum tree_code, tree,
gimple_seq *, gimple_seq *);
/* Lower the OpenMP sections directive in the current statement in GSI_P.
CTX is the enclosing OMP context for the current statement. */
static void
lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block, control;
gimple_stmt_iterator tgsi;
gomp_sections *stmt;
gimple *t;
gbind *new_stmt, *bind;
gimple_seq ilist, dlist, olist, tred_dlist = NULL, clist = NULL, new_body;
stmt = as_a <gomp_sections *> (gsi_stmt (*gsi_p));
push_gimplify_context ();
dlist = NULL;
ilist = NULL;
tree rclauses
= omp_task_reductions_find_first (gimple_omp_sections_clauses (stmt),
OMP_SECTIONS, OMP_CLAUSE_REDUCTION);
tree rtmp = NULL_TREE;
if (rclauses)
{
tree type = build_pointer_type (pointer_sized_int_node);
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_sections_clauses (stmt);
gimple_omp_sections_set_clauses (stmt, c);
lower_omp_task_reductions (ctx, OMP_SECTIONS,
gimple_omp_sections_clauses (stmt),
&ilist, &tred_dlist);
rclauses = c;
rtmp = make_ssa_name (type);
gimple_seq_add_stmt (&ilist, gimple_build_assign (rtmp, temp));
}
tree *clauses_ptr = gimple_omp_sections_clauses_ptr (stmt);
lower_lastprivate_conditional_clauses (clauses_ptr, ctx);
lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
&ilist, &dlist, ctx, NULL);
control = create_tmp_var (unsigned_type_node, ".section");
gimple_omp_sections_set_control (stmt, control);
new_body = gimple_omp_body (stmt);
gimple_omp_set_body (stmt, NULL);
tgsi = gsi_start (new_body);
for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
{
omp_context *sctx;
gimple *sec_start;
sec_start = gsi_stmt (tgsi);
sctx = maybe_lookup_ctx (sec_start);
gcc_assert (sctx);
lower_omp (gimple_omp_body_ptr (sec_start), sctx);
gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
GSI_CONTINUE_LINKING);
gimple_omp_set_body (sec_start, NULL);
if (gsi_one_before_end_p (tgsi))
{
gimple_seq l = NULL;
lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
&ilist, &l, &clist, ctx);
gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
gimple_omp_section_set_last (sec_start);
}
gsi_insert_after (&tgsi, gimple_build_omp_return (false),
GSI_CONTINUE_LINKING);
}
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, new_body, block);
olist = NULL;
lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist,
&clist, ctx);
if (clist)
{
tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
gcall *g = gimple_build_call (fndecl, 0);
gimple_seq_add_stmt (&olist, g);
gimple_seq_add_seq (&olist, clist);
fndecl = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
g = gimple_build_call (fndecl, 0);
gimple_seq_add_stmt (&olist, g);
}
block = make_node (BLOCK);
new_stmt = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, new_stmt, true);
pop_gimplify_context (new_stmt);
gimple_bind_append_vars (new_stmt, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
new_body = NULL;
gimple_seq_add_seq (&new_body, ilist);
gimple_seq_add_stmt (&new_body, stmt);
gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
gimple_seq_add_stmt (&new_body, bind);
t = gimple_build_omp_continue (control, control);
gimple_seq_add_stmt (&new_body, t);
gimple_seq_add_seq (&new_body, olist);
if (ctx->cancellable)
gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, dlist);
new_body = maybe_catch_exception (new_body);
bool nowait = omp_find_clause (gimple_omp_sections_clauses (stmt),
OMP_CLAUSE_NOWAIT) != NULL_TREE;
t = gimple_build_omp_return (nowait);
gimple_seq_add_stmt (&new_body, t);
gimple_seq_add_seq (&new_body, tred_dlist);
maybe_add_implicit_barrier_cancel (ctx, t, &new_body);
if (rclauses)
OMP_CLAUSE_DECL (rclauses) = rtmp;
gimple_bind_set_body (new_stmt, new_body);
}
/* A subroutine of lower_omp_single. Expand the simple form of
a GIMPLE_OMP_SINGLE, without a copyprivate clause:
if (GOMP_single_start ())
BODY;
[ GOMP_barrier (); ] -> unless 'nowait' is present.
FIXME. It may be better to delay expanding the logic of this until
pass_expand_omp. The expanded logic may make the job more difficult
to a synchronization analysis pass. */
static void
lower_omp_single_simple (gomp_single *single_stmt, gimple_seq *pre_p)
{
location_t loc = gimple_location (single_stmt);
tree tlabel = create_artificial_label (loc);
tree flabel = create_artificial_label (loc);
gimple *call, *cond;
tree lhs, decl;
decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)));
call = gimple_build_call (decl, 0);
gimple_call_set_lhs (call, lhs);
gimple_seq_add_stmt (pre_p, call);
cond = gimple_build_cond (EQ_EXPR, lhs,
fold_convert_loc (loc, TREE_TYPE (lhs),
boolean_true_node),
tlabel, flabel);
gimple_seq_add_stmt (pre_p, cond);
gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
}
/* A subroutine of lower_omp_single. Expand the simple form of
a GIMPLE_OMP_SINGLE, with a copyprivate clause:
#pragma omp single copyprivate (a, b, c)
Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
{
if ((copyout_p = GOMP_single_copy_start ()) == NULL)
{
BODY;
copyout.a = a;
copyout.b = b;
copyout.c = c;
GOMP_single_copy_end (©out);
}
else
{
a = copyout_p->a;
b = copyout_p->b;
c = copyout_p->c;
}
GOMP_barrier ();
}
FIXME. It may be better to delay expanding the logic of this until
pass_expand_omp. The expanded logic may make the job more difficult
to a synchronization analysis pass. */
static void
lower_omp_single_copy (gomp_single *single_stmt, gimple_seq *pre_p,
omp_context *ctx)
{
tree ptr_type, t, l0, l1, l2, bfn_decl;
gimple_seq copyin_seq;
location_t loc = gimple_location (single_stmt);
ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
ptr_type = build_pointer_type (ctx->record_type);
ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
l0 = create_artificial_label (loc);
l1 = create_artificial_label (loc);
l2 = create_artificial_label (loc);
bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
t = build_call_expr_loc (loc, bfn_decl, 0);
t = fold_convert_loc (loc, ptr_type, t);
gimplify_assign (ctx->receiver_decl, t, pre_p);
t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
build_int_cst (ptr_type, 0));
t = build3 (COND_EXPR, void_type_node, t,
build_and_jump (&l0), build_and_jump (&l1));
gimplify_and_add (t, pre_p);
gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
copyin_seq = NULL;
lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
©in_seq, ctx);
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
t = build_call_expr_loc (loc, bfn_decl, 1, t);
gimplify_and_add (t, pre_p);
t = build_and_jump (&l2);
gimplify_and_add (t, pre_p);
gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
gimple_seq_add_seq (pre_p, copyin_seq);
gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
}
/* Expand code for an OpenMP single directive. */
static void
lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
gomp_single *single_stmt = as_a <gomp_single *> (gsi_stmt (*gsi_p));
gbind *bind;
gimple_seq bind_body, bind_body_tail = NULL, dlist;
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
bind_body = NULL;
dlist = NULL;
lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
&bind_body, &dlist, ctx, NULL);
lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
gimple_seq_add_stmt (&bind_body, single_stmt);
if (ctx->record_type)
lower_omp_single_copy (single_stmt, &bind_body, ctx);
else
lower_omp_single_simple (single_stmt, &bind_body);
gimple_omp_set_body (single_stmt, NULL);
gimple_seq_add_seq (&bind_body, dlist);
bind_body = maybe_catch_exception (bind_body);
bool nowait = omp_find_clause (gimple_omp_single_clauses (single_stmt),
OMP_CLAUSE_NOWAIT) != NULL_TREE;
gimple *g = gimple_build_omp_return (nowait);
gimple_seq_add_stmt (&bind_body_tail, g);
maybe_add_implicit_barrier_cancel (ctx, g, &bind_body_tail);
if (ctx->record_type)
{
gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
tree clobber = build_clobber (ctx->record_type);
gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
clobber), GSI_SAME_STMT);
}
gimple_seq_add_seq (&bind_body, bind_body_tail);
gimple_bind_set_body (bind, bind_body);
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
}
/* Expand code for an OpenMP master directive. */
static void
lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block, lab = NULL, x, bfn_decl;
gimple *stmt = gsi_stmt (*gsi_p);
gbind *bind;
location_t loc = gimple_location (stmt);
gimple_seq tseq;
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
x = build_call_expr_loc (loc, bfn_decl, 0);
x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
tseq = NULL;
gimplify_and_add (x, &tseq);
gimple_bind_add_seq (bind, tseq);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
gimple_bind_add_stmt (bind, gimple_build_label (lab));
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
}
/* Helper function for lower_omp_task_reductions. For a specific PASS
find out the current clause it should be processed, or return false
if all have been processed already. */
static inline bool
omp_task_reduction_iterate (int pass, enum tree_code code,
enum omp_clause_code ccode, tree *c, tree *decl,
tree *type, tree *next)
{
for (; *c; *c = omp_find_clause (OMP_CLAUSE_CHAIN (*c), ccode))
{
if (ccode == OMP_CLAUSE_REDUCTION
&& code != OMP_TASKLOOP
&& !OMP_CLAUSE_REDUCTION_TASK (*c))
continue;
*decl = OMP_CLAUSE_DECL (*c);
*type = TREE_TYPE (*decl);
if (TREE_CODE (*decl) == MEM_REF)
{
if (pass != 1)
continue;
}
else
{
if (omp_is_reference (*decl))
*type = TREE_TYPE (*type);
if (pass != (!TREE_CONSTANT (TYPE_SIZE_UNIT (*type))))
continue;
}
*next = omp_find_clause (OMP_CLAUSE_CHAIN (*c), ccode);
return true;
}
*decl = NULL_TREE;
*type = NULL_TREE;
*next = NULL_TREE;
return false;
}
/* Lower task_reduction and reduction clauses (the latter unless CODE is
OMP_TASKGROUP only with task modifier). Register mapping of those in
START sequence and reducing them and unregister them in the END sequence. */
static void
lower_omp_task_reductions (omp_context *ctx, enum tree_code code, tree clauses,
gimple_seq *start, gimple_seq *end)
{
enum omp_clause_code ccode
= (code == OMP_TASKGROUP
? OMP_CLAUSE_TASK_REDUCTION : OMP_CLAUSE_REDUCTION);
tree cancellable = NULL_TREE;
clauses = omp_task_reductions_find_first (clauses, code, ccode);
if (clauses == NULL_TREE)
return;
if (code == OMP_FOR || code == OMP_SECTIONS)
{
for (omp_context *outer = ctx->outer; outer; outer = outer->outer)
if (gimple_code (outer->stmt) == GIMPLE_OMP_PARALLEL
&& outer->cancellable)
{
cancellable = error_mark_node;
break;
}
else if (gimple_code (outer->stmt) != GIMPLE_OMP_TASKGROUP)
break;
}
tree record_type = lang_hooks.types.make_type (RECORD_TYPE);
tree *last = &TYPE_FIELDS (record_type);
unsigned cnt = 0;
if (cancellable)
{
tree field = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
ptr_type_node);
tree ifield = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
integer_type_node);
*last = field;
DECL_CHAIN (field) = ifield;
last = &DECL_CHAIN (ifield);
DECL_CONTEXT (field) = record_type;
if (TYPE_ALIGN (record_type) < DECL_ALIGN (field))
SET_TYPE_ALIGN (record_type, DECL_ALIGN (field));
DECL_CONTEXT (ifield) = record_type;
if (TYPE_ALIGN (record_type) < DECL_ALIGN (ifield))
SET_TYPE_ALIGN (record_type, DECL_ALIGN (ifield));
}
for (int pass = 0; pass < 2; pass++)
{
tree decl, type, next;
for (tree c = clauses;
omp_task_reduction_iterate (pass, code, ccode,
&c, &decl, &type, &next); c = next)
{
++cnt;
tree new_type = type;
if (ctx->outer)
new_type = remap_type (type, &ctx->outer->cb);
tree field
= build_decl (OMP_CLAUSE_LOCATION (c), FIELD_DECL,
DECL_P (decl) ? DECL_NAME (decl) : NULL_TREE,
new_type);
if (DECL_P (decl) && type == TREE_TYPE (decl))
{
SET_DECL_ALIGN (field, DECL_ALIGN (decl));
DECL_USER_ALIGN (field) = DECL_USER_ALIGN (decl);
TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (decl);
}
else
SET_DECL_ALIGN (field, TYPE_ALIGN (type));
DECL_CONTEXT (field) = record_type;
if (TYPE_ALIGN (record_type) < DECL_ALIGN (field))
SET_TYPE_ALIGN (record_type, DECL_ALIGN (field));
*last = field;
last = &DECL_CHAIN (field);
tree bfield
= build_decl (OMP_CLAUSE_LOCATION (c), FIELD_DECL, NULL_TREE,
boolean_type_node);
DECL_CONTEXT (bfield) = record_type;
if (TYPE_ALIGN (record_type) < DECL_ALIGN (bfield))
SET_TYPE_ALIGN (record_type, DECL_ALIGN (bfield));
*last = bfield;
last = &DECL_CHAIN (bfield);
}
}
*last = NULL_TREE;
layout_type (record_type);
/* Build up an array which registers with the runtime all the reductions
and deregisters them at the end. Format documented in libgomp/task.c. */
tree atype = build_array_type_nelts (pointer_sized_int_node, 7 + cnt * 3);
tree avar = create_tmp_var_raw (atype);
gimple_add_tmp_var (avar);
TREE_ADDRESSABLE (avar) = 1;
tree r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_zero_node,
NULL_TREE, NULL_TREE);
tree t = build_int_cst (pointer_sized_int_node, cnt);
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
gimple_seq seq = NULL;
tree sz = fold_convert (pointer_sized_int_node,
TYPE_SIZE_UNIT (record_type));
int cachesz = 64;
sz = fold_build2 (PLUS_EXPR, pointer_sized_int_node, sz,
build_int_cst (pointer_sized_int_node, cachesz - 1));
sz = fold_build2 (BIT_AND_EXPR, pointer_sized_int_node, sz,
build_int_cst (pointer_sized_int_node, ~(cachesz - 1)));
ctx->task_reductions.create (1 + cnt);
ctx->task_reduction_map = new hash_map<tree, unsigned>;
ctx->task_reductions.quick_push (TREE_CODE (sz) == INTEGER_CST
? sz : NULL_TREE);
sz = force_gimple_operand (sz, &seq, true, NULL_TREE);
gimple_seq_add_seq (start, seq);
r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_one_node,
NULL_TREE, NULL_TREE);
gimple_seq_add_stmt (start, gimple_build_assign (r, sz));
r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (2),
NULL_TREE, NULL_TREE);
t = build_int_cst (pointer_sized_int_node,
MAX (TYPE_ALIGN_UNIT (record_type), (unsigned) cachesz));
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (3),
NULL_TREE, NULL_TREE);
t = build_int_cst (pointer_sized_int_node, -1);
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (4),
NULL_TREE, NULL_TREE);
t = build_int_cst (pointer_sized_int_node, 0);
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
/* In end, build a loop that iterates from 0 to < omp_get_num_threads ()
and for each task reduction checks a bool right after the private variable
within that thread's chunk; if the bool is clear, it hasn't been
initialized and thus isn't going to be reduced nor destructed, otherwise
reduce and destruct it. */
tree idx = create_tmp_var (size_type_node);
gimple_seq_add_stmt (end, gimple_build_assign (idx, size_zero_node));
tree num_thr_sz = create_tmp_var (size_type_node);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
tree lab2 = create_artificial_label (UNKNOWN_LOCATION);
tree lab3 = NULL_TREE;
gimple *g;
if (code == OMP_FOR || code == OMP_SECTIONS)
{
/* For worksharing constructs, only perform it in the master thread,
with the exception of cancelled implicit barriers - then only handle
the current thread. */
tree lab4 = create_artificial_label (UNKNOWN_LOCATION);
t = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
tree thr_num = create_tmp_var (integer_type_node);
g = gimple_build_call (t, 0);
gimple_call_set_lhs (g, thr_num);
gimple_seq_add_stmt (end, g);
if (cancellable)
{
tree c;
tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
tree lab6 = create_artificial_label (UNKNOWN_LOCATION);
lab3 = create_artificial_label (UNKNOWN_LOCATION);
if (code == OMP_FOR)
c = gimple_omp_for_clauses (ctx->stmt);
else /* if (code == OMP_SECTIONS) */
c = gimple_omp_sections_clauses (ctx->stmt);
c = OMP_CLAUSE_DECL (omp_find_clause (c, OMP_CLAUSE__REDUCTEMP_));
cancellable = c;
g = gimple_build_cond (NE_EXPR, c, build_zero_cst (TREE_TYPE (c)),
lab5, lab6);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab5));
g = gimple_build_assign (idx, NOP_EXPR, thr_num);
gimple_seq_add_stmt (end, g);
g = gimple_build_assign (num_thr_sz, PLUS_EXPR, idx,
build_one_cst (TREE_TYPE (idx)));
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_goto (lab3));
gimple_seq_add_stmt (end, gimple_build_label (lab6));
}
g = gimple_build_cond (NE_EXPR, thr_num, integer_zero_node, lab2, lab4);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab4));
}
if (code != OMP_PARALLEL)
{
t = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
tree num_thr = create_tmp_var (integer_type_node);
g = gimple_build_call (t, 0);
gimple_call_set_lhs (g, num_thr);
gimple_seq_add_stmt (end, g);
g = gimple_build_assign (num_thr_sz, NOP_EXPR, num_thr);
gimple_seq_add_stmt (end, g);
if (cancellable)
gimple_seq_add_stmt (end, gimple_build_label (lab3));
}
else
{
tree c = omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE__REDUCTEMP_);
t = fold_convert (pointer_sized_int_node, OMP_CLAUSE_DECL (c));
t = fold_convert (size_type_node, t);
gimplify_assign (num_thr_sz, t, end);
}
t = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (2),
NULL_TREE, NULL_TREE);
tree data = create_tmp_var (pointer_sized_int_node);
gimple_seq_add_stmt (end, gimple_build_assign (data, t));
gimple_seq_add_stmt (end, gimple_build_label (lab1));
tree ptr;
if (TREE_CODE (TYPE_SIZE_UNIT (record_type)) == INTEGER_CST)
ptr = create_tmp_var (build_pointer_type (record_type));
else
ptr = create_tmp_var (ptr_type_node);
gimple_seq_add_stmt (end, gimple_build_assign (ptr, NOP_EXPR, data));
tree field = TYPE_FIELDS (record_type);
cnt = 0;
if (cancellable)
field = DECL_CHAIN (DECL_CHAIN (field));
for (int pass = 0; pass < 2; pass++)
{
tree decl, type, next;
for (tree c = clauses;
omp_task_reduction_iterate (pass, code, ccode,
&c, &decl, &type, &next); c = next)
{
tree var = decl, ref;
if (TREE_CODE (decl) == MEM_REF)
{
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == POINTER_PLUS_EXPR)
var = TREE_OPERAND (var, 0);
tree v = var;
if (TREE_CODE (var) == ADDR_EXPR)
var = TREE_OPERAND (var, 0);
else if (TREE_CODE (var) == INDIRECT_REF)
var = TREE_OPERAND (var, 0);
tree orig_var = var;
if (is_variable_sized (var))
{
gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
var = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var) == INDIRECT_REF);
var = TREE_OPERAND (var, 0);
gcc_assert (DECL_P (var));
}
t = ref = maybe_lookup_decl_in_outer_ctx (var, ctx);
if (orig_var != var)
gcc_assert (TREE_CODE (v) == ADDR_EXPR);
else if (TREE_CODE (v) == ADDR_EXPR)
t = build_fold_addr_expr (t);
else if (TREE_CODE (v) == INDIRECT_REF)
t = build_fold_indirect_ref (t);
if (TREE_CODE (TREE_OPERAND (decl, 0)) == POINTER_PLUS_EXPR)
{
tree b = TREE_OPERAND (TREE_OPERAND (decl, 0), 1);
b = maybe_lookup_decl_in_outer_ctx (b, ctx);
t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, b);
}
if (!integer_zerop (TREE_OPERAND (decl, 1)))
t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
fold_convert (size_type_node,
TREE_OPERAND (decl, 1)));
}
else
{
t = ref = maybe_lookup_decl_in_outer_ctx (var, ctx);
if (!omp_is_reference (decl))
t = build_fold_addr_expr (t);
}
t = fold_convert (pointer_sized_int_node, t);
seq = NULL;
t = force_gimple_operand (t, &seq, true, NULL_TREE);
gimple_seq_add_seq (start, seq);
r = build4 (ARRAY_REF, pointer_sized_int_node, avar,
size_int (7 + cnt * 3), NULL_TREE, NULL_TREE);
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
t = unshare_expr (byte_position (field));
t = fold_convert (pointer_sized_int_node, t);
ctx->task_reduction_map->put (c, cnt);
ctx->task_reductions.quick_push (TREE_CODE (t) == INTEGER_CST
? t : NULL_TREE);
seq = NULL;
t = force_gimple_operand (t, &seq, true, NULL_TREE);
gimple_seq_add_seq (start, seq);
r = build4 (ARRAY_REF, pointer_sized_int_node, avar,
size_int (7 + cnt * 3 + 1), NULL_TREE, NULL_TREE);
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
tree bfield = DECL_CHAIN (field);
tree cond;
if (code == OMP_PARALLEL || code == OMP_FOR || code == OMP_SECTIONS)
/* In parallel or worksharing all threads unconditionally
initialize all their task reduction private variables. */
cond = boolean_true_node;
else if (TREE_TYPE (ptr) == ptr_type_node)
{
cond = build2 (POINTER_PLUS_EXPR, ptr_type_node, ptr,
unshare_expr (byte_position (bfield)));
seq = NULL;
cond = force_gimple_operand (cond, &seq, true, NULL_TREE);
gimple_seq_add_seq (end, seq);
tree pbool = build_pointer_type (TREE_TYPE (bfield));
cond = build2 (MEM_REF, TREE_TYPE (bfield), cond,
build_int_cst (pbool, 0));
}
else
cond = build3 (COMPONENT_REF, TREE_TYPE (bfield),
build_simple_mem_ref (ptr), bfield, NULL_TREE);
tree lab3 = create_artificial_label (UNKNOWN_LOCATION);
tree lab4 = create_artificial_label (UNKNOWN_LOCATION);
tree condv = create_tmp_var (boolean_type_node);
gimple_seq_add_stmt (end, gimple_build_assign (condv, cond));
g = gimple_build_cond (NE_EXPR, condv, boolean_false_node,
lab3, lab4);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab3));
if (cancellable && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == NULL_TREE)
{
/* If this reduction doesn't need destruction and parallel
has been cancelled, there is nothing to do for this
reduction, so jump around the merge operation. */
tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, cancellable,
build_zero_cst (TREE_TYPE (cancellable)),
lab4, lab5);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab5));
}
tree new_var;
if (TREE_TYPE (ptr) == ptr_type_node)
{
new_var = build2 (POINTER_PLUS_EXPR, ptr_type_node, ptr,
unshare_expr (byte_position (field)));
seq = NULL;
new_var = force_gimple_operand (new_var, &seq, true, NULL_TREE);
gimple_seq_add_seq (end, seq);
tree pbool = build_pointer_type (TREE_TYPE (field));
new_var = build2 (MEM_REF, TREE_TYPE (field), new_var,
build_int_cst (pbool, 0));
}
else
new_var = build3 (COMPONENT_REF, TREE_TYPE (field),
build_simple_mem_ref (ptr), field, NULL_TREE);
enum tree_code rcode = OMP_CLAUSE_REDUCTION_CODE (c);
if (TREE_CODE (decl) != MEM_REF && omp_is_reference (decl))
ref = build_simple_mem_ref (ref);
/* reduction(-:var) sums up the partial results, so it acts
identically to reduction(+:var). */
if (rcode == MINUS_EXPR)
rcode = PLUS_EXPR;
if (TREE_CODE (decl) == MEM_REF)
{
tree type = TREE_TYPE (new_var);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
tree i = create_tmp_var (TREE_TYPE (v));
tree ptype = build_pointer_type (TREE_TYPE (type));
if (DECL_P (v))
{
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
tree vv = create_tmp_var (TREE_TYPE (v));
gimplify_assign (vv, v, start);
v = vv;
}
ref = build4 (ARRAY_REF, pointer_sized_int_node, avar,
size_int (7 + cnt * 3), NULL_TREE, NULL_TREE);
new_var = build_fold_addr_expr (new_var);
new_var = fold_convert (ptype, new_var);
ref = fold_convert (ptype, ref);
tree m = create_tmp_var (ptype);
gimplify_assign (m, new_var, end);
new_var = m;
m = create_tmp_var (ptype);
gimplify_assign (m, ref, end);
ref = m;
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), end);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree endl = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (end, gimple_build_label (body));
tree priv = build_simple_mem_ref (new_var);
tree out = build_simple_mem_ref (ref);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree decl_placeholder
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
tree lab6 = NULL_TREE;
if (cancellable)
{
/* If this reduction needs destruction and parallel
has been cancelled, jump around the merge operation
to the destruction. */
tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
lab6 = create_artificial_label (UNKNOWN_LOCATION);
tree zero = build_zero_cst (TREE_TYPE (cancellable));
g = gimple_build_cond (NE_EXPR, cancellable, zero,
lab6, lab5);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab5));
}
SET_DECL_VALUE_EXPR (placeholder, out);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
SET_DECL_VALUE_EXPR (decl_placeholder, priv);
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (end,
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
{
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = NULL;
}
if (cancellable)
gimple_seq_add_stmt (end, gimple_build_label (lab6));
tree x = lang_hooks.decls.omp_clause_dtor (c, priv);
if (x)
{
gimple_seq tseq = NULL;
gimplify_stmt (&x, &tseq);
gimple_seq_add_seq (end, tseq);
}
}
else
{
tree x = build2 (rcode, TREE_TYPE (out), out, priv);
out = unshare_expr (out);
gimplify_assign (out, x, end);
}
gimple *g
= gimple_build_assign (new_var, POINTER_PLUS_EXPR, new_var,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (end, g);
g = gimple_build_assign (ref, POINTER_PLUS_EXPR, ref,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (end, g);
g = gimple_build_assign (i, PLUS_EXPR, i,
build_int_cst (TREE_TYPE (i), 1));
gimple_seq_add_stmt (end, g);
g = gimple_build_cond (LE_EXPR, i, v, body, endl);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (endl));
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree oldv = NULL_TREE;
tree lab6 = NULL_TREE;
if (cancellable)
{
/* If this reduction needs destruction and parallel
has been cancelled, jump around the merge operation
to the destruction. */
tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
lab6 = create_artificial_label (UNKNOWN_LOCATION);
tree zero = build_zero_cst (TREE_TYPE (cancellable));
g = gimple_build_cond (NE_EXPR, cancellable, zero,
lab6, lab5);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab5));
}
if (omp_is_reference (decl)
&& !useless_type_conversion_p (TREE_TYPE (placeholder),
TREE_TYPE (ref)))
ref = build_fold_addr_expr_loc (OMP_CLAUSE_LOCATION (c), ref);
ref = build_fold_addr_expr_loc (OMP_CLAUSE_LOCATION (c), ref);
tree refv = create_tmp_var (TREE_TYPE (ref));
gimplify_assign (refv, ref, end);
ref = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), refv);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
tree d = maybe_lookup_decl (decl, ctx);
gcc_assert (d);
if (DECL_HAS_VALUE_EXPR_P (d))
oldv = DECL_VALUE_EXPR (d);
if (omp_is_reference (var))
{
tree v = fold_convert (TREE_TYPE (d),
build_fold_addr_expr (new_var));
SET_DECL_VALUE_EXPR (d, v);
}
else
SET_DECL_VALUE_EXPR (d, new_var);
DECL_HAS_VALUE_EXPR_P (d) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
if (oldv)
SET_DECL_VALUE_EXPR (d, oldv);
else
{
SET_DECL_VALUE_EXPR (d, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (d) = 0;
}
gimple_seq_add_seq (end, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
if (cancellable)
gimple_seq_add_stmt (end, gimple_build_label (lab6));
tree x = lang_hooks.decls.omp_clause_dtor (c, new_var);
if (x)
{
gimple_seq tseq = NULL;
gimplify_stmt (&x, &tseq);
gimple_seq_add_seq (end, tseq);
}
}
else
{
tree x = build2 (rcode, TREE_TYPE (ref), ref, new_var);
ref = unshare_expr (ref);
gimplify_assign (ref, x, end);
}
gimple_seq_add_stmt (end, gimple_build_label (lab4));
++cnt;
field = DECL_CHAIN (bfield);
}
}
if (code == OMP_TASKGROUP)
{
t = builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_REDUCTION_REGISTER);
g = gimple_build_call (t, 1, build_fold_addr_expr (avar));
gimple_seq_add_stmt (start, g);
}
else
{
tree c;
if (code == OMP_FOR)
c = gimple_omp_for_clauses (ctx->stmt);
else if (code == OMP_SECTIONS)
c = gimple_omp_sections_clauses (ctx->stmt);
else
c = gimple_omp_taskreg_clauses (ctx->stmt);
c = omp_find_clause (c, OMP_CLAUSE__REDUCTEMP_);
t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (c)),
build_fold_addr_expr (avar));
gimplify_assign (OMP_CLAUSE_DECL (c), t, start);
}
gimple_seq_add_stmt (end, gimple_build_assign (data, PLUS_EXPR, data, sz));
gimple_seq_add_stmt (end, gimple_build_assign (idx, PLUS_EXPR, idx,
size_one_node));
g = gimple_build_cond (NE_EXPR, idx, num_thr_sz, lab1, lab2);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab2));
if (code == OMP_FOR || code == OMP_SECTIONS)
{
enum built_in_function bfn
= BUILT_IN_GOMP_WORKSHARE_TASK_REDUCTION_UNREGISTER;
t = builtin_decl_explicit (bfn);
tree c_bool_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (t)));
tree arg;
if (cancellable)
{
arg = create_tmp_var (c_bool_type);
gimple_seq_add_stmt (end, gimple_build_assign (arg, NOP_EXPR,
cancellable));
}
else
arg = build_int_cst (c_bool_type, 0);
g = gimple_build_call (t, 1, arg);
}
else
{
t = builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_REDUCTION_UNREGISTER);
g = gimple_build_call (t, 1, build_fold_addr_expr (avar));
}
gimple_seq_add_stmt (end, g);
t = build_constructor (atype, NULL);
TREE_THIS_VOLATILE (t) = 1;
gimple_seq_add_stmt (end, gimple_build_assign (avar, t));
}
/* Expand code for an OpenMP taskgroup directive. */
static void
lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
gcall *x;
gbind *bind;
gimple_seq dseq = NULL;
tree block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
push_gimplify_context ();
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
0);
gimple_bind_add_stmt (bind, x);
lower_omp_task_reductions (ctx, OMP_TASKGROUP,
gimple_omp_taskgroup_clauses (stmt),
gimple_bind_body_ptr (bind), &dseq);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
gimple_bind_add_seq (bind, dseq);
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
}
/* Fold the OMP_ORDERED_CLAUSES for the OMP_ORDERED in STMT if possible. */
static void
lower_omp_ordered_clauses (gimple_stmt_iterator *gsi_p, gomp_ordered *ord_stmt,
omp_context *ctx)
{
struct omp_for_data fd;
if (!ctx->outer || gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR)
return;
unsigned int len = gimple_omp_for_collapse (ctx->outer->stmt);
struct omp_for_data_loop *loops = XALLOCAVEC (struct omp_for_data_loop, len);
omp_extract_for_data (as_a <gomp_for *> (ctx->outer->stmt), &fd, loops);
if (!fd.ordered)
return;
tree *list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
tree c = gimple_omp_ordered_clauses (ord_stmt);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
{
/* Merge depend clauses from multiple adjacent
#pragma omp ordered depend(sink:...) constructs
into one #pragma omp ordered depend(sink:...), so that
we can optimize them together. */
gimple_stmt_iterator gsi = *gsi_p;
gsi_next (&gsi);
while (!gsi_end_p (gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt)
|| gimple_code (stmt) == GIMPLE_NOP)
{
gsi_next (&gsi);
continue;
}
if (gimple_code (stmt) != GIMPLE_OMP_ORDERED)
break;
gomp_ordered *ord_stmt2 = as_a <gomp_ordered *> (stmt);
c = gimple_omp_ordered_clauses (ord_stmt2);
if (c == NULL_TREE
|| OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
break;
while (*list_p)
list_p = &OMP_CLAUSE_CHAIN (*list_p);
*list_p = c;
gsi_remove (&gsi, true);
}
}
/* Canonicalize sink dependence clauses into one folded clause if
possible.
The basic algorithm is to create a sink vector whose first
element is the GCD of all the first elements, and whose remaining
elements are the minimum of the subsequent columns.
We ignore dependence vectors whose first element is zero because
such dependencies are known to be executed by the same thread.
We take into account the direction of the loop, so a minimum
becomes a maximum if the loop is iterating forwards. We also
ignore sink clauses where the loop direction is unknown, or where
the offsets are clearly invalid because they are not a multiple
of the loop increment.
For example:
#pragma omp for ordered(2)
for (i=0; i < N; ++i)
for (j=0; j < M; ++j)
{
#pragma omp ordered \
depend(sink:i-8,j-2) \
depend(sink:i,j-1) \ // Completely ignored because i+0.
depend(sink:i-4,j-3) \
depend(sink:i-6,j-4)
#pragma omp ordered depend(source)
}
Folded clause is:
depend(sink:-gcd(8,4,6),-min(2,3,4))
-or-
depend(sink:-2,-2)
*/
/* FIXME: Computing GCD's where the first element is zero is
non-trivial in the presence of collapsed loops. Do this later. */
if (fd.collapse > 1)
return;
wide_int *folded_deps = XALLOCAVEC (wide_int, 2 * len - 1);
/* wide_int is not a POD so it must be default-constructed. */
for (unsigned i = 0; i != 2 * len - 1; ++i)
new (static_cast<void*>(folded_deps + i)) wide_int ();
tree folded_dep = NULL_TREE;
/* TRUE if the first dimension's offset is negative. */
bool neg_offset_p = false;
list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
unsigned int i;
while ((c = *list_p) != NULL)
{
bool remove = false;
gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND);
if (OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
goto next_ordered_clause;
tree vec;
for (vec = OMP_CLAUSE_DECL (c), i = 0;
vec && TREE_CODE (vec) == TREE_LIST;
vec = TREE_CHAIN (vec), ++i)
{
gcc_assert (i < len);
/* omp_extract_for_data has canonicalized the condition. */
gcc_assert (fd.loops[i].cond_code == LT_EXPR
|| fd.loops[i].cond_code == GT_EXPR);
bool forward = fd.loops[i].cond_code == LT_EXPR;
bool maybe_lexically_later = true;
/* While the committee makes up its mind, bail if we have any
non-constant steps. */
if (TREE_CODE (fd.loops[i].step) != INTEGER_CST)
goto lower_omp_ordered_ret;
tree itype = TREE_TYPE (TREE_VALUE (vec));
if (POINTER_TYPE_P (itype))
itype = sizetype;
wide_int offset = wide_int::from (wi::to_wide (TREE_PURPOSE (vec)),
TYPE_PRECISION (itype),
TYPE_SIGN (itype));
/* Ignore invalid offsets that are not multiples of the step. */
if (!wi::multiple_of_p (wi::abs (offset),
wi::abs (wi::to_wide (fd.loops[i].step)),
UNSIGNED))
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"ignoring sink clause with offset that is not "
"a multiple of the loop step");
remove = true;
goto next_ordered_clause;
}
/* Calculate the first dimension. The first dimension of
the folded dependency vector is the GCD of the first
elements, while ignoring any first elements whose offset
is 0. */
if (i == 0)
{
/* Ignore dependence vectors whose first dimension is 0. */
if (offset == 0)
{
remove = true;
goto next_ordered_clause;
}
else
{
if (!TYPE_UNSIGNED (itype) && (forward ^ wi::neg_p (offset)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"first offset must be in opposite direction "
"of loop iterations");
goto lower_omp_ordered_ret;
}
if (forward)
offset = -offset;
neg_offset_p = forward;
/* Initialize the first time around. */
if (folded_dep == NULL_TREE)
{
folded_dep = c;
folded_deps[0] = offset;
}
else
folded_deps[0] = wi::gcd (folded_deps[0],
offset, UNSIGNED);
}
}
/* Calculate minimum for the remaining dimensions. */
else
{
folded_deps[len + i - 1] = offset;
if (folded_dep == c)
folded_deps[i] = offset;
else if (maybe_lexically_later
&& !wi::eq_p (folded_deps[i], offset))
{
if (forward ^ wi::gts_p (folded_deps[i], offset))
{
unsigned int j;
folded_dep = c;
for (j = 1; j <= i; j++)
folded_deps[j] = folded_deps[len + j - 1];
}
else
maybe_lexically_later = false;
}
}
}
gcc_assert (i == len);
remove = true;
next_ordered_clause:
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
if (folded_dep)
{
if (neg_offset_p)
folded_deps[0] = -folded_deps[0];
tree itype = TREE_TYPE (TREE_VALUE (OMP_CLAUSE_DECL (folded_dep)));
if (POINTER_TYPE_P (itype))
itype = sizetype;
TREE_PURPOSE (OMP_CLAUSE_DECL (folded_dep))
= wide_int_to_tree (itype, folded_deps[0]);
OMP_CLAUSE_CHAIN (folded_dep) = gimple_omp_ordered_clauses (ord_stmt);
*gimple_omp_ordered_clauses_ptr (ord_stmt) = folded_dep;
}
lower_omp_ordered_ret:
/* Ordered without clauses is #pragma omp threads, while we want
a nop instead if we remove all clauses. */
if (gimple_omp_ordered_clauses (ord_stmt) == NULL_TREE)
gsi_replace (gsi_p, gimple_build_nop (), true);
}
/* Expand code for an OpenMP ordered directive. */
static void
lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
gimple *stmt = gsi_stmt (*gsi_p), *g;
gomp_ordered *ord_stmt = as_a <gomp_ordered *> (stmt);
gcall *x;
gbind *bind;
bool simd = omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_SIMD);
/* FIXME: this should check presence of OMP_CLAUSE__SIMT_ on the enclosing
loop. */
bool maybe_simt
= simd && omp_maybe_offloaded_ctx (ctx) && omp_max_simt_vf () > 1;
bool threads = omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_THREADS);
if (omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_DEPEND))
{
/* FIXME: This is needs to be moved to the expansion to verify various
conditions only testable on cfg with dominators computed, and also
all the depend clauses to be merged still might need to be available
for the runtime checks. */
if (0)
lower_omp_ordered_clauses (gsi_p, ord_stmt, ctx);
return;
}
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
if (simd)
{
x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_START, 1,
build_int_cst (NULL_TREE, threads));
cfun->has_simduid_loops = true;
}
else
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
0);
gimple_bind_add_stmt (bind, x);
tree counter = NULL_TREE, test = NULL_TREE, body = NULL_TREE;
if (maybe_simt)
{
counter = create_tmp_var (integer_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_LANE, 0);
gimple_call_set_lhs (g, counter);
gimple_bind_add_stmt (bind, g);
body = create_artificial_label (UNKNOWN_LOCATION);
test = create_artificial_label (UNKNOWN_LOCATION);
gimple_bind_add_stmt (bind, gimple_build_label (body));
tree simt_pred = create_tmp_var (integer_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_ORDERED_PRED, 1, counter);
gimple_call_set_lhs (g, simt_pred);
gimple_bind_add_stmt (bind, g);
tree t = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, simt_pred, integer_zero_node, t, test);
gimple_bind_add_stmt (bind, g);
gimple_bind_add_stmt (bind, gimple_build_label (t));
}
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
if (maybe_simt)
{
gimple_bind_add_stmt (bind, gimple_build_label (test));
g = gimple_build_assign (counter, MINUS_EXPR, counter, integer_one_node);
gimple_bind_add_stmt (bind, g);
tree c = build2 (GE_EXPR, boolean_type_node, counter, integer_zero_node);
tree nonneg = create_tmp_var (integer_type_node);
gimple_seq tseq = NULL;
gimplify_assign (nonneg, fold_convert (integer_type_node, c), &tseq);
gimple_bind_add_seq (bind, tseq);
g = gimple_build_call_internal (IFN_GOMP_SIMT_VOTE_ANY, 1, nonneg);
gimple_call_set_lhs (g, nonneg);
gimple_bind_add_stmt (bind, g);
tree end = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, nonneg, integer_zero_node, body, end);
gimple_bind_add_stmt (bind, g);
gimple_bind_add_stmt (bind, gimple_build_label (end));
}
if (simd)
x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_END, 1,
build_int_cst (NULL_TREE, threads));
else
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END),
0);
gimple_bind_add_stmt (bind, x);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
}
/* Expand code for an OpenMP scan directive and the structured block
before the scan directive. */
static void
lower_omp_scan (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
bool has_clauses
= gimple_omp_scan_clauses (as_a <gomp_scan *> (stmt)) != NULL;
tree lane = NULL_TREE;
gimple_seq before = NULL;
omp_context *octx = ctx->outer;
gcc_assert (octx);
if (octx->scan_exclusive && !has_clauses)
{
gimple_stmt_iterator gsi2 = *gsi_p;
gsi_next (&gsi2);
gimple *stmt2 = gsi_stmt (gsi2);
/* For exclusive scan, swap GIMPLE_OMP_SCAN without clauses
with following GIMPLE_OMP_SCAN with clauses, so that input_phase,
the one with exclusive clause(s), comes first. */
if (stmt2
&& gimple_code (stmt2) == GIMPLE_OMP_SCAN
&& gimple_omp_scan_clauses (as_a <gomp_scan *> (stmt2)) != NULL)
{
gsi_remove (gsi_p, false);
gsi_insert_after (gsi_p, stmt, GSI_SAME_STMT);
ctx = maybe_lookup_ctx (stmt2);
gcc_assert (ctx);
lower_omp_scan (gsi_p, ctx);
return;
}
}
bool input_phase = has_clauses ^ octx->scan_inclusive;
bool is_simd = (gimple_code (octx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (octx->stmt) == GF_OMP_FOR_KIND_SIMD);
bool is_for = (gimple_code (octx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (octx->stmt) == GF_OMP_FOR_KIND_FOR
&& !gimple_omp_for_combined_p (octx->stmt));
bool is_for_simd = is_simd && gimple_omp_for_combined_into_p (octx->stmt);
if (is_for_simd && octx->for_simd_scan_phase)
is_simd = false;
if (is_simd)
if (tree c = omp_find_clause (gimple_omp_for_clauses (octx->stmt),
OMP_CLAUSE__SIMDUID_))
{
tree uid = OMP_CLAUSE__SIMDUID__DECL (c);
lane = create_tmp_var (unsigned_type_node);
tree t = build_int_cst (integer_type_node,
input_phase ? 1
: octx->scan_inclusive ? 2 : 3);
gimple *g
= gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 2, uid, t);
gimple_call_set_lhs (g, lane);
gimple_seq_add_stmt (&before, g);
}
if (is_simd || is_for)
{
for (tree c = gimple_omp_for_clauses (octx->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree var = OMP_CLAUSE_DECL (c);
tree new_var = lookup_decl (var, octx);
tree val = new_var;
tree var2 = NULL_TREE;
tree var3 = NULL_TREE;
tree var4 = NULL_TREE;
tree lane0 = NULL_TREE;
tree new_vard = new_var;
if (omp_is_reference (var))
{
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
val = new_var;
}
if (DECL_HAS_VALUE_EXPR_P (new_vard))
{
val = DECL_VALUE_EXPR (new_vard);
if (new_vard != new_var)
{
gcc_assert (TREE_CODE (val) == ADDR_EXPR);
val = TREE_OPERAND (val, 0);
}
if (TREE_CODE (val) == ARRAY_REF
&& VAR_P (TREE_OPERAND (val, 0)))
{
tree v = TREE_OPERAND (val, 0);
if (lookup_attribute ("omp simd array",
DECL_ATTRIBUTES (v)))
{
val = unshare_expr (val);
lane0 = TREE_OPERAND (val, 1);
TREE_OPERAND (val, 1) = lane;
var2 = lookup_decl (v, octx);
if (octx->scan_exclusive)
var4 = lookup_decl (var2, octx);
if (input_phase
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
var3 = maybe_lookup_decl (var4 ? var4 : var2, octx);
if (!input_phase)
{
var2 = build4 (ARRAY_REF, TREE_TYPE (val),
var2, lane, NULL_TREE, NULL_TREE);
TREE_THIS_NOTRAP (var2) = 1;
if (octx->scan_exclusive)
{
var4 = build4 (ARRAY_REF, TREE_TYPE (val),
var4, lane, NULL_TREE,
NULL_TREE);
TREE_THIS_NOTRAP (var4) = 1;
}
}
else
var2 = val;
}
}
gcc_assert (var2);
}
else
{
var2 = build_outer_var_ref (var, octx);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
var3 = maybe_lookup_decl (new_vard, octx);
if (var3 == new_vard || var3 == NULL_TREE)
var3 = NULL_TREE;
else if (is_simd && octx->scan_exclusive && !input_phase)
{
var4 = maybe_lookup_decl (var3, octx);
if (var4 == var3 || var4 == NULL_TREE)
{
if (TREE_ADDRESSABLE (TREE_TYPE (new_var)))
{
var4 = var3;
var3 = NULL_TREE;
}
else
var4 = NULL_TREE;
}
}
}
if (is_simd
&& octx->scan_exclusive
&& !input_phase
&& var4 == NULL_TREE)
var4 = create_tmp_var (TREE_TYPE (val));
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
if (input_phase)
{
if (var3)
{
/* If we've added a separate identity element
variable, copy it over into val. */
tree x = lang_hooks.decls.omp_clause_assign_op (c, val,
var3);
gimplify_and_add (x, &before);
}
else if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
/* Otherwise, assign to it the identity element. */
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
if (is_for)
tseq = copy_gimple_seq_and_replace_locals (tseq);
tree ref = build_outer_var_ref (var, octx);
tree x = (DECL_HAS_VALUE_EXPR_P (new_vard)
? DECL_VALUE_EXPR (new_vard) : NULL_TREE);
if (x)
{
if (new_vard != new_var)
val = build_fold_addr_expr_loc (clause_loc, val);
SET_DECL_VALUE_EXPR (new_vard, val);
}
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&tseq, octx);
if (x)
SET_DECL_VALUE_EXPR (new_vard, x);
SET_DECL_VALUE_EXPR (placeholder, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
gimple_seq_add_seq (&before, tseq);
if (is_simd)
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
}
}
else if (is_simd)
{
tree x;
if (octx->scan_exclusive)
{
tree v4 = unshare_expr (var4);
tree v2 = unshare_expr (var2);
x = lang_hooks.decls.omp_clause_assign_op (c, v4, v2);
gimplify_and_add (x, &before);
}
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
x = (DECL_HAS_VALUE_EXPR_P (new_vard)
? DECL_VALUE_EXPR (new_vard) : NULL_TREE);
tree vexpr = val;
if (x && new_vard != new_var)
vexpr = build_fold_addr_expr_loc (clause_loc, val);
if (x)
SET_DECL_VALUE_EXPR (new_vard, vexpr);
SET_DECL_VALUE_EXPR (placeholder, var2);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&tseq, octx);
gimple_seq_add_seq (&before, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
if (x)
SET_DECL_VALUE_EXPR (new_vard, x);
SET_DECL_VALUE_EXPR (placeholder, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (octx->scan_inclusive)
{
x = lang_hooks.decls.omp_clause_assign_op (c, val,
var2);
gimplify_and_add (x, &before);
}
else if (lane0 == NULL_TREE)
{
x = lang_hooks.decls.omp_clause_assign_op (c, val,
var4);
gimplify_and_add (x, &before);
}
}
}
else
{
if (input_phase)
{
/* input phase. Set val to initializer before
the body. */
tree x = omp_reduction_init (c, TREE_TYPE (new_var));
gimplify_assign (val, x, &before);
}
else if (is_simd)
{
/* scan phase. */
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
if (code == MINUS_EXPR)
code = PLUS_EXPR;
tree x = build2 (code, TREE_TYPE (var2),
unshare_expr (var2), unshare_expr (val));
if (octx->scan_inclusive)
{
gimplify_assign (unshare_expr (var2), x, &before);
gimplify_assign (val, var2, &before);
}
else
{
gimplify_assign (unshare_expr (var4),
unshare_expr (var2), &before);
gimplify_assign (var2, x, &before);
if (lane0 == NULL_TREE)
gimplify_assign (val, var4, &before);
}
}
}
if (octx->scan_exclusive && !input_phase && lane0)
{
tree vexpr = unshare_expr (var4);
TREE_OPERAND (vexpr, 1) = lane0;
if (new_vard != new_var)
vexpr = build_fold_addr_expr_loc (clause_loc, vexpr);
SET_DECL_VALUE_EXPR (new_vard, vexpr);
}
}
}
if (is_simd && !is_for_simd)
{
gsi_insert_seq_after (gsi_p, gimple_omp_body (stmt), GSI_SAME_STMT);
gsi_insert_seq_after (gsi_p, before, GSI_SAME_STMT);
gsi_replace (gsi_p, gimple_build_nop (), true);
return;
}
lower_omp (gimple_omp_body_ptr (stmt), octx);
if (before)
{
gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (stmt));
gsi_insert_seq_before (&gsi, before, GSI_SAME_STMT);
}
}
/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
substitution of a couple of function calls. But in the NAMED case,
requires that languages coordinate a symbol name. It is therefore
best put here in common code. */
static GTY(()) hash_map<tree, tree> *critical_name_mutexes;
static void
lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
tree name, lock, unlock;
gomp_critical *stmt = as_a <gomp_critical *> (gsi_stmt (*gsi_p));
gbind *bind;
location_t loc = gimple_location (stmt);
gimple_seq tbody;
name = gimple_omp_critical_name (stmt);
if (name)
{
tree decl;
if (!critical_name_mutexes)
critical_name_mutexes = hash_map<tree, tree>::create_ggc (10);
tree *n = critical_name_mutexes->get (name);
if (n == NULL)
{
char *new_str;
decl = create_tmp_var_raw (ptr_type_node);
new_str = ACONCAT ((".gomp_critical_user_",
IDENTIFIER_POINTER (name), NULL));
DECL_NAME (decl) = get_identifier (new_str);
TREE_PUBLIC (decl) = 1;
TREE_STATIC (decl) = 1;
DECL_COMMON (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
varpool_node::finalize_decl (decl);
critical_name_mutexes->put (name, decl);
}
else
decl = *n;
/* If '#pragma omp critical' is inside offloaded region or
inside function marked as offloadable, the symbol must be
marked as offloadable too. */
omp_context *octx;
if (cgraph_node::get (current_function_decl)->offloadable)
varpool_node::get_create (decl)->offloadable = 1;
else
for (octx = ctx->outer; octx; octx = octx->outer)
if (is_gimple_omp_offloaded (octx->stmt))
{
varpool_node::get_create (decl)->offloadable = 1;
break;
}
lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
lock = build_call_expr_loc (loc, lock, 1,
build_fold_addr_expr_loc (loc, decl));
unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
unlock = build_call_expr_loc (loc, unlock, 1,
build_fold_addr_expr_loc (loc, decl));
}
else
{
lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
lock = build_call_expr_loc (loc, lock, 0);
unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
unlock = build_call_expr_loc (loc, unlock, 0);
}
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
tbody = gimple_bind_body (bind);
gimplify_and_add (lock, &tbody);
gimple_bind_set_body (bind, tbody);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
tbody = gimple_bind_body (bind);
gimplify_and_add (unlock, &tbody);
gimple_bind_set_body (bind, tbody);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
}
/* A subroutine of lower_omp_for. Generate code to emit the predicate
for a lastprivate clause. Given a loop control predicate of (V
cond N2), we gate the clause on (!(V cond N2)). The lowered form
is appended to *DLIST, iterator initialization is appended to
*BODY_P. *CLIST is for lastprivate(conditional:) code that needs
to be emitted in a critical section. */
static void
lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
gimple_seq *dlist, gimple_seq *clist,
struct omp_context *ctx)
{
tree clauses, cond, vinit;
enum tree_code cond_code;
gimple_seq stmts;
cond_code = fd->loop.cond_code;
cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
/* When possible, use a strict equality expression. This can let VRP
type optimizations deduce the value and remove a copy. */
if (tree_fits_shwi_p (fd->loop.step))
{
HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
if (step == 1 || step == -1)
cond_code = EQ_EXPR;
}
if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_GRID_LOOP
|| gimple_omp_for_grid_phony (fd->for_stmt))
cond = omp_grid_lastprivate_predicate (fd);
else
{
tree n2 = fd->loop.n2;
if (fd->collapse > 1
&& TREE_CODE (n2) != INTEGER_CST
&& gimple_omp_for_combined_into_p (fd->for_stmt))
{
struct omp_context *taskreg_ctx = NULL;
if (gimple_code (ctx->outer->stmt) == GIMPLE_OMP_FOR)
{
gomp_for *gfor = as_a <gomp_for *> (ctx->outer->stmt);
if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_FOR
|| gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_DISTRIBUTE)
{
if (gimple_omp_for_combined_into_p (gfor))
{
gcc_assert (ctx->outer->outer
&& is_parallel_ctx (ctx->outer->outer));
taskreg_ctx = ctx->outer->outer;
}
else
{
struct omp_for_data outer_fd;
omp_extract_for_data (gfor, &outer_fd, NULL);
n2 = fold_convert (TREE_TYPE (n2), outer_fd.loop.n2);
}
}
else if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_TASKLOOP)
taskreg_ctx = ctx->outer->outer;
}
else if (is_taskreg_ctx (ctx->outer))
taskreg_ctx = ctx->outer;
if (taskreg_ctx)
{
int i;
tree taskreg_clauses
= gimple_omp_taskreg_clauses (taskreg_ctx->stmt);
tree innerc = omp_find_clause (taskreg_clauses,
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
for (i = 0; i < fd->collapse; i++)
{
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
}
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
if (innerc)
n2 = fold_convert (TREE_TYPE (n2),
lookup_decl (OMP_CLAUSE_DECL (innerc),
taskreg_ctx));
}
}
cond = build2 (cond_code, boolean_type_node, fd->loop.v, n2);
}
clauses = gimple_omp_for_clauses (fd->for_stmt);
stmts = NULL;
lower_lastprivate_clauses (clauses, cond, body_p, &stmts, clist, ctx);
if (!gimple_seq_empty_p (stmts))
{
gimple_seq_add_seq (&stmts, *dlist);
*dlist = stmts;
/* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
vinit = fd->loop.n1;
if (cond_code == EQ_EXPR
&& tree_fits_shwi_p (fd->loop.n2)
&& ! integer_zerop (fd->loop.n2))
vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
else
vinit = unshare_expr (vinit);
/* Initialize the iterator variable, so that threads that don't execute
any iterations don't execute the lastprivate clauses by accident. */
gimplify_assign (fd->loop.v, vinit, body_p);
}
}
/* Callback for walk_gimple_seq. Find #pragma omp scan statement. */
static tree
omp_find_scan (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_SIMD
&& gimple_omp_for_combined_into_p (stmt))
*handled_ops_p = false;
break;
case GIMPLE_OMP_SCAN:
*(gimple_stmt_iterator *) (wi->info) = *gsi_p;
return integer_zero_node;
default:
break;
}
return NULL;
}
/* Helper function for lower_omp_for, add transformations for a worksharing
loop with scan directives inside of it.
For worksharing loop not combined with simd, transform:
#pragma omp for reduction(inscan,+:r) private(i)
for (i = 0; i < n; i = i + 1)
{
{
update (r);
}
#pragma omp scan inclusive(r)
{
use (r);
}
}
into two worksharing loops + code to merge results:
num_threads = omp_get_num_threads ();
thread_num = omp_get_thread_num ();
if (thread_num == 0) goto <D.2099>; else goto <D.2100>;
<D.2099>:
var2 = r;
goto <D.2101>;
<D.2100>:
// For UDRs this is UDR init, or if ctors are needed, copy from
// var3 that has been constructed to contain the neutral element.
var2 = 0;
<D.2101>:
ivar = 0;
// The _scantemp_ clauses will arrange for rpriva to be initialized to
// a shared array with num_threads elements and rprivb to a local array
// number of elements equal to the number of (contiguous) iterations the
// current thread will perform. controlb and controlp variables are
// temporaries to handle deallocation of rprivb at the end of second
// GOMP_FOR.
#pragma omp for _scantemp_(rpriva) _scantemp_(rprivb) _scantemp_(controlb) \
_scantemp_(controlp) reduction(inscan,+:r) private(i) nowait
for (i = 0; i < n; i = i + 1)
{
{
// For UDRs this is UDR init or copy from var3.
r = 0;
// This is the input phase from user code.
update (r);
}
{
// For UDRs this is UDR merge.
var2 = var2 + r;
// Rather than handing it over to the user, save to local thread's
// array.
rprivb[ivar] = var2;
// For exclusive scan, the above two statements are swapped.
ivar = ivar + 1;
}
}
// And remember the final value from this thread's into the shared
// rpriva array.
rpriva[(sizetype) thread_num] = var2;
// If more than one thread, compute using Work-Efficient prefix sum
// the inclusive parallel scan of the rpriva array.
if (num_threads > 1) goto <D.2102>; else goto <D.2103>;
<D.2102>:
GOMP_barrier ();
down = 0;
k = 1;
num_threadsu = (unsigned int) num_threads;
thread_numup1 = (unsigned int) thread_num + 1;
<D.2108>:
twok = k << 1;
if (twok > num_threadsu) goto <D.2110>; else goto <D.2111>;
<D.2110>:
down = 4294967295;
k = k >> 1;
if (k == num_threadsu) goto <D.2112>; else goto <D.2111>;
<D.2112>:
k = k >> 1;
<D.2111>:
twok = k << 1;
cplx = .MUL_OVERFLOW (thread_nump1, twok);
mul = REALPART_EXPR <cplx>;
ovf = IMAGPART_EXPR <cplx>;
if (ovf == 0) goto <D.2116>; else goto <D.2117>;
<D.2116>:
andv = k & down;
andvm1 = andv + 4294967295;
l = mul + andvm1;
if (l < num_threadsu) goto <D.2120>; else goto <D.2117>;
<D.2120>:
// For UDRs this is UDR merge, performed using var2 variable as temporary,
// i.e. var2 = rpriva[l - k]; UDR merge (var2, rpriva[l]); rpriva[l] = var2;
rpriva[l] = rpriva[l - k] + rpriva[l];
<D.2117>:
if (down == 0) goto <D.2121>; else goto <D.2122>;
<D.2121>:
k = k << 1;
goto <D.2123>;
<D.2122>:
k = k >> 1;
<D.2123>:
GOMP_barrier ();
if (k != 0) goto <D.2108>; else goto <D.2103>;
<D.2103>:
if (thread_num == 0) goto <D.2124>; else goto <D.2125>;
<D.2124>:
// For UDRs this is UDR init or copy from var3.
var2 = 0;
goto <D.2126>;
<D.2125>:
var2 = rpriva[thread_num - 1];
<D.2126>:
ivar = 0;
#pragma omp for _scantemp_(controlb) _scantemp_(controlp) \
reduction(inscan,+:r) private(i)
for (i = 0; i < n; i = i + 1)
{
{
// For UDRs, this is r = var2; UDR merge (r, rprivb[ivar]);
r = var2 + rprivb[ivar];
}
{
// This is the scan phase from user code.
use (r);
// Plus a bump of the iterator.
ivar = ivar + 1;
}
} */
static void
lower_omp_for_scan (gimple_seq *body_p, gimple_seq *dlist, gomp_for *stmt,
struct omp_for_data *fd, omp_context *ctx)
{
bool is_for_simd = gimple_omp_for_combined_p (stmt);
gcc_assert (ctx->scan_inclusive || ctx->scan_exclusive);
gimple_seq body = gimple_omp_body (stmt);
gimple_stmt_iterator input1_gsi = gsi_none ();
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input1_gsi;
walk_gimple_seq_mod (&body, omp_find_scan, NULL, &wi);
gcc_assert (!gsi_end_p (input1_gsi));
gimple *input_stmt1 = gsi_stmt (input1_gsi);
gimple_stmt_iterator gsi = input1_gsi;
gsi_next (&gsi);
gimple_stmt_iterator scan1_gsi = gsi;
gimple *scan_stmt1 = gsi_stmt (gsi);
gcc_assert (scan_stmt1 && gimple_code (scan_stmt1) == GIMPLE_OMP_SCAN);
gimple_seq input_body = gimple_omp_body (input_stmt1);
gimple_seq scan_body = gimple_omp_body (scan_stmt1);
gimple_omp_set_body (input_stmt1, NULL);
gimple_omp_set_body (scan_stmt1, NULL);
gimple_omp_set_body (stmt, NULL);
gomp_for *new_stmt = as_a <gomp_for *> (gimple_copy (stmt));
gimple_seq new_body = copy_gimple_seq_and_replace_locals (body);
gimple_omp_set_body (stmt, body);
gimple_omp_set_body (input_stmt1, input_body);
gimple_stmt_iterator input2_gsi = gsi_none ();
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input2_gsi;
walk_gimple_seq_mod (&new_body, omp_find_scan, NULL, &wi);
gcc_assert (!gsi_end_p (input2_gsi));
gimple *input_stmt2 = gsi_stmt (input2_gsi);
gsi = input2_gsi;
gsi_next (&gsi);
gimple_stmt_iterator scan2_gsi = gsi;
gimple *scan_stmt2 = gsi_stmt (gsi);
gcc_assert (scan_stmt2 && gimple_code (scan_stmt2) == GIMPLE_OMP_SCAN);
gimple_omp_set_body (scan_stmt2, scan_body);
gimple_stmt_iterator input3_gsi = gsi_none ();
gimple_stmt_iterator scan3_gsi = gsi_none ();
gimple_stmt_iterator input4_gsi = gsi_none ();
gimple_stmt_iterator scan4_gsi = gsi_none ();
gimple *input_stmt3 = NULL, *scan_stmt3 = NULL;
gimple *input_stmt4 = NULL, *scan_stmt4 = NULL;
omp_context *input_simd_ctx = NULL, *scan_simd_ctx = NULL;
if (is_for_simd)
{
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input3_gsi;
walk_gimple_seq_mod (&input_body, omp_find_scan, NULL, &wi);
gcc_assert (!gsi_end_p (input3_gsi));
input_stmt3 = gsi_stmt (input3_gsi);
gsi = input3_gsi;
gsi_next (&gsi);
scan3_gsi = gsi;
scan_stmt3 = gsi_stmt (gsi);
gcc_assert (scan_stmt3 && gimple_code (scan_stmt3) == GIMPLE_OMP_SCAN);
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input4_gsi;
walk_gimple_seq_mod (&scan_body, omp_find_scan, NULL, &wi);
gcc_assert (!gsi_end_p (input4_gsi));
input_stmt4 = gsi_stmt (input4_gsi);
gsi = input4_gsi;
gsi_next (&gsi);
scan4_gsi = gsi;
scan_stmt4 = gsi_stmt (gsi);
gcc_assert (scan_stmt4 && gimple_code (scan_stmt4) == GIMPLE_OMP_SCAN);
input_simd_ctx = maybe_lookup_ctx (input_stmt3)->outer;
scan_simd_ctx = maybe_lookup_ctx (input_stmt4)->outer;
}
tree num_threads = create_tmp_var (integer_type_node);
tree thread_num = create_tmp_var (integer_type_node);
tree nthreads_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
tree threadnum_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
gimple *g = gimple_build_call (nthreads_decl, 0);
gimple_call_set_lhs (g, num_threads);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_call (threadnum_decl, 0);
gimple_call_set_lhs (g, thread_num);
gimple_seq_add_stmt (body_p, g);
tree ivar = create_tmp_var (sizetype);
tree new_clauses1 = NULL_TREE, new_clauses2 = NULL_TREE;
tree *cp1 = &new_clauses1, *cp2 = &new_clauses2;
tree k = create_tmp_var (unsigned_type_node);
tree l = create_tmp_var (unsigned_type_node);
gimple_seq clist = NULL, mdlist = NULL;
gimple_seq thr01_list = NULL, thrn1_list = NULL;
gimple_seq thr02_list = NULL, thrn2_list = NULL;
gimple_seq scan1_list = NULL, input2_list = NULL;
gimple_seq last_list = NULL, reduc_list = NULL;
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree var = OMP_CLAUSE_DECL (c);
tree new_var = lookup_decl (var, ctx);
tree var3 = NULL_TREE;
tree new_vard = new_var;
if (omp_is_reference (var))
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
var3 = maybe_lookup_decl (new_vard, ctx);
if (var3 == new_vard)
var3 = NULL_TREE;
}
tree ptype = build_pointer_type (TREE_TYPE (new_var));
tree rpriva = create_tmp_var (ptype);
tree nc = build_omp_clause (clause_loc, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = rpriva;
*cp1 = nc;
cp1 = &OMP_CLAUSE_CHAIN (nc);
tree rprivb = create_tmp_var (ptype);
nc = build_omp_clause (clause_loc, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = rprivb;
OMP_CLAUSE__SCANTEMP__ALLOC (nc) = 1;
*cp1 = nc;
cp1 = &OMP_CLAUSE_CHAIN (nc);
tree var2 = create_tmp_var_raw (TREE_TYPE (new_var));
if (new_vard != new_var)
TREE_ADDRESSABLE (var2) = 1;
gimple_add_tmp_var (var2);
tree x = fold_convert_loc (clause_loc, sizetype, thread_num);
x = fold_build2_loc (clause_loc, MULT_EXPR, sizetype, x,
TYPE_SIZE_UNIT (TREE_TYPE (ptype)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (rpriva), rpriva, x);
tree rpriva_ref = build_simple_mem_ref_loc (clause_loc, x);
x = fold_build2_loc (clause_loc, PLUS_EXPR, integer_type_node,
thread_num, integer_minus_one_node);
x = fold_convert_loc (clause_loc, sizetype, x);
x = fold_build2_loc (clause_loc, MULT_EXPR, sizetype, x,
TYPE_SIZE_UNIT (TREE_TYPE (ptype)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (rpriva), rpriva, x);
tree rprivam1_ref = build_simple_mem_ref_loc (clause_loc, x);
x = fold_convert_loc (clause_loc, sizetype, l);
x = fold_build2_loc (clause_loc, MULT_EXPR, sizetype, x,
TYPE_SIZE_UNIT (TREE_TYPE (ptype)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (rpriva), rpriva, x);
tree rprival_ref = build_simple_mem_ref_loc (clause_loc, x);
x = fold_build2_loc (clause_loc, MINUS_EXPR, unsigned_type_node, l, k);
x = fold_convert_loc (clause_loc, sizetype, x);
x = fold_build2_loc (clause_loc, MULT_EXPR, sizetype, x,
TYPE_SIZE_UNIT (TREE_TYPE (ptype)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (rpriva), rpriva, x);
tree rprivalmk_ref = build_simple_mem_ref_loc (clause_loc, x);
x = fold_build2_loc (clause_loc, MULT_EXPR, sizetype, ivar,
TYPE_SIZE_UNIT (TREE_TYPE (ptype)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (rprivb), rprivb, x);
tree rprivb_ref = build_simple_mem_ref_loc (clause_loc, x);
tree var4 = is_for_simd ? new_var : var2;
tree var5 = NULL_TREE, var6 = NULL_TREE;
if (is_for_simd)
{
var5 = lookup_decl (var, input_simd_ctx);
var6 = lookup_decl (var, scan_simd_ctx);
if (new_vard != new_var)
{
var5 = build_simple_mem_ref_loc (clause_loc, var5);
var6 = build_simple_mem_ref_loc (clause_loc, var6);
}
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree val = var2;
x = lang_hooks.decls.omp_clause_default_ctor
(c, var2, build_outer_var_ref (var, ctx));
if (x)
gimplify_and_add (x, &clist);
x = build_outer_var_ref (var, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, unshare_expr (var4),
x);
gimplify_and_add (x, &thr01_list);
tree y = (DECL_HAS_VALUE_EXPR_P (new_vard)
? DECL_VALUE_EXPR (new_vard) : NULL_TREE);
if (var3)
{
x = unshare_expr (var4);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var3);
gimplify_and_add (x, &thrn1_list);
x = unshare_expr (var4);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var3);
gimplify_and_add (x, &thr02_list);
}
else if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
/* Otherwise, assign to it the identity element. */
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
tseq = copy_gimple_seq_and_replace_locals (tseq);
if (!is_for_simd)
{
if (new_vard != new_var)
val = build_fold_addr_expr_loc (clause_loc, val);
SET_DECL_VALUE_EXPR (new_vard, val);
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
SET_DECL_VALUE_EXPR (placeholder, error_mark_node);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&thrn1_list, tseq);
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&thr02_list, tseq);
SET_DECL_VALUE_EXPR (placeholder, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (y)
SET_DECL_VALUE_EXPR (new_vard, y);
else
{
DECL_HAS_VALUE_EXPR_P (new_vard) = 0;
SET_DECL_VALUE_EXPR (new_vard, NULL_TREE);
}
}
x = unshare_expr (var4);
x = lang_hooks.decls.omp_clause_assign_op (c, x, rprivam1_ref);
gimplify_and_add (x, &thrn2_list);
if (is_for_simd)
{
x = unshare_expr (rprivb_ref);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var5);
gimplify_and_add (x, &scan1_list);
}
else
{
if (ctx->scan_exclusive)
{
x = unshare_expr (rprivb_ref);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var2);
gimplify_and_add (x, &scan1_list);
}
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
tseq = copy_gimple_seq_and_replace_locals (tseq);
SET_DECL_VALUE_EXPR (placeholder, var2);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&scan1_list, tseq);
if (ctx->scan_inclusive)
{
x = unshare_expr (rprivb_ref);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var2);
gimplify_and_add (x, &scan1_list);
}
}
x = unshare_expr (rpriva_ref);
x = lang_hooks.decls.omp_clause_assign_op (c, x,
unshare_expr (var4));
gimplify_and_add (x, &mdlist);
x = unshare_expr (is_for_simd ? var6 : new_var);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var4);
gimplify_and_add (x, &input2_list);
val = rprivb_ref;
if (new_vard != new_var)
val = build_fold_addr_expr_loc (clause_loc, val);
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
tseq = copy_gimple_seq_and_replace_locals (tseq);
SET_DECL_VALUE_EXPR (new_vard, val);
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
if (is_for_simd)
{
SET_DECL_VALUE_EXPR (placeholder, var6);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
}
else
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
lower_omp (&tseq, ctx);
if (y)
SET_DECL_VALUE_EXPR (new_vard, y);
else
{
DECL_HAS_VALUE_EXPR_P (new_vard) = 0;
SET_DECL_VALUE_EXPR (new_vard, NULL_TREE);
}
if (!is_for_simd)
{
SET_DECL_VALUE_EXPR (placeholder, new_var);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&tseq, ctx);
}
gimple_seq_add_seq (&input2_list, tseq);
x = build_outer_var_ref (var, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, x, rpriva_ref);
gimplify_and_add (x, &last_list);
x = lang_hooks.decls.omp_clause_assign_op (c, var2, rprivalmk_ref);
gimplify_and_add (x, &reduc_list);
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
tseq = copy_gimple_seq_and_replace_locals (tseq);
val = rprival_ref;
if (new_vard != new_var)
val = build_fold_addr_expr_loc (clause_loc, val);
SET_DECL_VALUE_EXPR (new_vard, val);
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
SET_DECL_VALUE_EXPR (placeholder, var2);
lower_omp (&tseq, ctx);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
SET_DECL_VALUE_EXPR (placeholder, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (y)
SET_DECL_VALUE_EXPR (new_vard, y);
else
{
DECL_HAS_VALUE_EXPR_P (new_vard) = 0;
SET_DECL_VALUE_EXPR (new_vard, NULL_TREE);
}
gimple_seq_add_seq (&reduc_list, tseq);
x = lang_hooks.decls.omp_clause_assign_op (c, rprival_ref, var2);
gimplify_and_add (x, &reduc_list);
x = lang_hooks.decls.omp_clause_dtor (c, var2);
if (x)
gimplify_and_add (x, dlist);
}
else
{
x = build_outer_var_ref (var, ctx);
gimplify_assign (unshare_expr (var4), x, &thr01_list);
x = omp_reduction_init (c, TREE_TYPE (new_var));
gimplify_assign (unshare_expr (var4), unshare_expr (x),
&thrn1_list);
gimplify_assign (unshare_expr (var4), x, &thr02_list);
gimplify_assign (unshare_expr (var4), rprivam1_ref, &thrn2_list);
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
if (code == MINUS_EXPR)
code = PLUS_EXPR;
if (is_for_simd)
gimplify_assign (unshare_expr (rprivb_ref), var5, &scan1_list);
else
{
if (ctx->scan_exclusive)
gimplify_assign (unshare_expr (rprivb_ref), var2,
&scan1_list);
x = build2 (code, TREE_TYPE (new_var), var2, new_var);
gimplify_assign (var2, x, &scan1_list);
if (ctx->scan_inclusive)
gimplify_assign (unshare_expr (rprivb_ref), var2,
&scan1_list);
}
gimplify_assign (unshare_expr (rpriva_ref), unshare_expr (var4),
&mdlist);
x = build2 (code, TREE_TYPE (new_var), var4, rprivb_ref);
gimplify_assign (is_for_simd ? var6 : new_var, x, &input2_list);
gimplify_assign (build_outer_var_ref (var, ctx), rpriva_ref,
&last_list);
x = build2 (code, TREE_TYPE (new_var), rprivalmk_ref,
unshare_expr (rprival_ref));
gimplify_assign (rprival_ref, x, &reduc_list);
}
}
g = gimple_build_assign (ivar, PLUS_EXPR, ivar, size_one_node);
gimple_seq_add_stmt (&scan1_list, g);
g = gimple_build_assign (ivar, PLUS_EXPR, ivar, size_one_node);
gimple_seq_add_stmt (gimple_omp_body_ptr (is_for_simd
? scan_stmt4 : scan_stmt2), g);
tree controlb = create_tmp_var (boolean_type_node);
tree controlp = create_tmp_var (ptr_type_node);
tree nc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = controlb;
OMP_CLAUSE__SCANTEMP__CONTROL (nc) = 1;
*cp1 = nc;
cp1 = &OMP_CLAUSE_CHAIN (nc);
nc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = controlp;
OMP_CLAUSE__SCANTEMP__CONTROL (nc) = 1;
*cp1 = nc;
cp1 = &OMP_CLAUSE_CHAIN (nc);
nc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = controlb;
OMP_CLAUSE__SCANTEMP__CONTROL (nc) = 1;
*cp2 = nc;
cp2 = &OMP_CLAUSE_CHAIN (nc);
nc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = controlp;
OMP_CLAUSE__SCANTEMP__CONTROL (nc) = 1;
*cp2 = nc;
cp2 = &OMP_CLAUSE_CHAIN (nc);
*cp1 = gimple_omp_for_clauses (stmt);
gimple_omp_for_set_clauses (stmt, new_clauses1);
*cp2 = gimple_omp_for_clauses (new_stmt);
gimple_omp_for_set_clauses (new_stmt, new_clauses2);
if (is_for_simd)
{
gimple_seq_add_seq (gimple_omp_body_ptr (scan_stmt3), scan1_list);
gimple_seq_add_seq (gimple_omp_body_ptr (input_stmt4), input2_list);
gsi_insert_seq_after (&input3_gsi, gimple_omp_body (input_stmt3),
GSI_SAME_STMT);
gsi_remove (&input3_gsi, true);
gsi_insert_seq_after (&scan3_gsi, gimple_omp_body (scan_stmt3),
GSI_SAME_STMT);
gsi_remove (&scan3_gsi, true);
gsi_insert_seq_after (&input4_gsi, gimple_omp_body (input_stmt4),
GSI_SAME_STMT);
gsi_remove (&input4_gsi, true);
gsi_insert_seq_after (&scan4_gsi, gimple_omp_body (scan_stmt4),
GSI_SAME_STMT);
gsi_remove (&scan4_gsi, true);
}
else
{
gimple_omp_set_body (scan_stmt1, scan1_list);
gimple_omp_set_body (input_stmt2, input2_list);
}
gsi_insert_seq_after (&input1_gsi, gimple_omp_body (input_stmt1),
GSI_SAME_STMT);
gsi_remove (&input1_gsi, true);
gsi_insert_seq_after (&scan1_gsi, gimple_omp_body (scan_stmt1),
GSI_SAME_STMT);
gsi_remove (&scan1_gsi, true);
gsi_insert_seq_after (&input2_gsi, gimple_omp_body (input_stmt2),
GSI_SAME_STMT);
gsi_remove (&input2_gsi, true);
gsi_insert_seq_after (&scan2_gsi, gimple_omp_body (scan_stmt2),
GSI_SAME_STMT);
gsi_remove (&scan2_gsi, true);
gimple_seq_add_seq (body_p, clist);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
tree lab2 = create_artificial_label (UNKNOWN_LOCATION);
tree lab3 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, thread_num, integer_zero_node, lab1, lab2);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, thr01_list);
g = gimple_build_goto (lab3);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, thrn1_list);
g = gimple_build_label (lab3);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (ivar, size_zero_node);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_stmt (body_p, stmt);
gimple_seq_add_seq (body_p, body);
gimple_seq_add_stmt (body_p, gimple_build_omp_continue (fd->loop.v,
fd->loop.v));
g = gimple_build_omp_return (true);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, mdlist);
lab1 = create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (GT_EXPR, num_threads, integer_one_node, lab1, lab2);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (body_p, g);
g = omp_build_barrier (NULL);
gimple_seq_add_stmt (body_p, g);
tree down = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (down, build_zero_cst (unsigned_type_node));
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (k, build_one_cst (unsigned_type_node));
gimple_seq_add_stmt (body_p, g);
tree num_threadsu = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (num_threadsu, NOP_EXPR, num_threads);
gimple_seq_add_stmt (body_p, g);
tree thread_numu = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (thread_numu, NOP_EXPR, thread_num);
gimple_seq_add_stmt (body_p, g);
tree thread_nump1 = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (thread_nump1, PLUS_EXPR, thread_numu,
build_int_cst (unsigned_type_node, 1));
gimple_seq_add_stmt (body_p, g);
lab3 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_label (lab3);
gimple_seq_add_stmt (body_p, g);
tree twok = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (twok, LSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
tree lab4 = create_artificial_label (UNKNOWN_LOCATION);
tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
tree lab6 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (GT_EXPR, twok, num_threadsu, lab4, lab5);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab4);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (down, build_all_ones_cst (unsigned_type_node));
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (k, RSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_cond (EQ_EXPR, k, num_threadsu, lab6, lab5);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab6);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (k, RSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab5);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (twok, LSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
tree cplx = create_tmp_var (build_complex_type (unsigned_type_node, false));
DECL_GIMPLE_REG_P (cplx) = 1;
g = gimple_build_call_internal (IFN_MUL_OVERFLOW, 2, thread_nump1, twok);
gimple_call_set_lhs (g, cplx);
gimple_seq_add_stmt (body_p, g);
tree mul = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (mul, REALPART_EXPR,
build1 (REALPART_EXPR, unsigned_type_node, cplx));
gimple_seq_add_stmt (body_p, g);
tree ovf = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (ovf, IMAGPART_EXPR,
build1 (IMAGPART_EXPR, unsigned_type_node, cplx));
gimple_seq_add_stmt (body_p, g);
tree lab7 = create_artificial_label (UNKNOWN_LOCATION);
tree lab8 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, ovf, build_zero_cst (unsigned_type_node),
lab7, lab8);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab7);
gimple_seq_add_stmt (body_p, g);
tree andv = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (andv, BIT_AND_EXPR, k, down);
gimple_seq_add_stmt (body_p, g);
tree andvm1 = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (andvm1, PLUS_EXPR, andv,
build_minus_one_cst (unsigned_type_node));
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (l, PLUS_EXPR, mul, andvm1);
gimple_seq_add_stmt (body_p, g);
tree lab9 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (LT_EXPR, l, num_threadsu, lab9, lab8);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab9);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, reduc_list);
g = gimple_build_label (lab8);
gimple_seq_add_stmt (body_p, g);
tree lab10 = create_artificial_label (UNKNOWN_LOCATION);
tree lab11 = create_artificial_label (UNKNOWN_LOCATION);
tree lab12 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, down, build_zero_cst (unsigned_type_node),
lab10, lab11);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab10);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (k, LSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_goto (lab12);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab11);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (k, RSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab12);
gimple_seq_add_stmt (body_p, g);
g = omp_build_barrier (NULL);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_cond (NE_EXPR, k, build_zero_cst (unsigned_type_node),
lab3, lab2);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (body_p, g);
lab1 = create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
lab3 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, thread_num, integer_zero_node, lab1, lab2);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, thr02_list);
g = gimple_build_goto (lab3);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, thrn2_list);
g = gimple_build_label (lab3);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (ivar, size_zero_node);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_stmt (body_p, new_stmt);
gimple_seq_add_seq (body_p, new_body);
gimple_seq new_dlist = NULL;
lab1 = create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
tree num_threadsm1 = create_tmp_var (integer_type_node);
g = gimple_build_assign (num_threadsm1, PLUS_EXPR, num_threads,
integer_minus_one_node);
gimple_seq_add_stmt (&new_dlist, g);
g = gimple_build_cond (EQ_EXPR, thread_num, num_threadsm1, lab1, lab2);
gimple_seq_add_stmt (&new_dlist, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (&new_dlist, g);
gimple_seq_add_seq (&new_dlist, last_list);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (&new_dlist, g);
gimple_seq_add_seq (&new_dlist, *dlist);
*dlist = new_dlist;
}
/* Lower code for an OMP loop directive. */
static void
lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree *rhs_p, block;
struct omp_for_data fd, *fdp = NULL;
gomp_for *stmt = as_a <gomp_for *> (gsi_stmt (*gsi_p));
gbind *new_stmt;
gimple_seq omp_for_body, body, dlist, tred_ilist = NULL, tred_dlist = NULL;
gimple_seq cnt_list = NULL, clist = NULL;
gimple_seq oacc_head = NULL, oacc_tail = NULL;
size_t i;
push_gimplify_context ();
lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
block = make_node (BLOCK);
new_stmt = gimple_build_bind (NULL, NULL, block);
/* Replace at gsi right away, so that 'stmt' is no member
of a sequence anymore as we're going to add to a different
one below. */
gsi_replace (gsi_p, new_stmt, true);
/* Move declaration of temporaries in the loop body before we make
it go away. */
omp_for_body = gimple_omp_body (stmt);
if (!gimple_seq_empty_p (omp_for_body)
&& gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
{
gbind *inner_bind
= as_a <gbind *> (gimple_seq_first_stmt (omp_for_body));
tree vars = gimple_bind_vars (inner_bind);
gimple_bind_append_vars (new_stmt, vars);
/* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
keep them on the inner_bind and it's block. */
gimple_bind_set_vars (inner_bind, NULL_TREE);
if (gimple_bind_block (inner_bind))
BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
}
if (gimple_omp_for_combined_into_p (stmt))
{
omp_extract_for_data (stmt, &fd, NULL);
fdp = &fd;
/* We need two temporaries with fd.loop.v type (istart/iend)
and then (fd.collapse - 1) temporaries with the same
type for count2 ... countN-1 vars if not constant. */
size_t count = 2;
tree type = fd.iter_type;
if (fd.collapse > 1
&& TREE_CODE (fd.loop.n2) != INTEGER_CST)
count += fd.collapse - 1;
bool taskreg_for
= (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR
|| gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP);
tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
tree simtc = NULL;
tree clauses = *pc;
if (taskreg_for)
outerc
= omp_find_clause (gimple_omp_taskreg_clauses (ctx->outer->stmt),
OMP_CLAUSE__LOOPTEMP_);
if (ctx->simt_stmt)
simtc = omp_find_clause (gimple_omp_for_clauses (ctx->simt_stmt),
OMP_CLAUSE__LOOPTEMP_);
for (i = 0; i < count; i++)
{
tree temp;
if (taskreg_for)
{
gcc_assert (outerc);
temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
outerc = omp_find_clause (OMP_CLAUSE_CHAIN (outerc),
OMP_CLAUSE__LOOPTEMP_);
}
else
{
/* If there are 2 adjacent SIMD stmts, one with _simt_
clause, another without, make sure they have the same
decls in _looptemp_ clauses, because the outer stmt
they are combined into will look up just one inner_stmt. */
if (ctx->simt_stmt)
temp = OMP_CLAUSE_DECL (simtc);
else
temp = create_tmp_var (type);
insert_decl_map (&ctx->outer->cb, temp, temp);
}
*pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
OMP_CLAUSE_DECL (*pc) = temp;
pc = &OMP_CLAUSE_CHAIN (*pc);
if (ctx->simt_stmt)
simtc = omp_find_clause (OMP_CLAUSE_CHAIN (simtc),
OMP_CLAUSE__LOOPTEMP_);
}
*pc = clauses;
}
/* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
dlist = NULL;
body = NULL;
tree rclauses
= omp_task_reductions_find_first (gimple_omp_for_clauses (stmt), OMP_FOR,
OMP_CLAUSE_REDUCTION);
tree rtmp = NULL_TREE;
if (rclauses)
{
tree type = build_pointer_type (pointer_sized_int_node);
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (stmt);
gimple_omp_for_set_clauses (stmt, c);
lower_omp_task_reductions (ctx, OMP_FOR,
gimple_omp_for_clauses (stmt),
&tred_ilist, &tred_dlist);
rclauses = c;
rtmp = make_ssa_name (type);
gimple_seq_add_stmt (&body, gimple_build_assign (rtmp, temp));
}
lower_lastprivate_conditional_clauses (gimple_omp_for_clauses_ptr (stmt),
ctx);
lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
fdp);
gimple_seq_add_seq (rclauses ? &tred_ilist : &body,
gimple_omp_for_pre_body (stmt));
lower_omp (gimple_omp_body_ptr (stmt), ctx);
/* Lower the header expressions. At this point, we can assume that
the header is of the form:
#pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
We just need to make sure that VAL1, VAL2 and VAL3 are lowered
using the .omp_data_s mapping, if needed. */
for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
{
rhs_p = gimple_omp_for_initial_ptr (stmt, i);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &cnt_list);
else if (TREE_CODE (*rhs_p) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (*rhs_p);
rhs_p = gimple_omp_for_final_ptr (stmt, i);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &cnt_list);
else if (TREE_CODE (*rhs_p) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (*rhs_p);
rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &cnt_list);
}
if (rclauses)
gimple_seq_add_seq (&tred_ilist, cnt_list);
else
gimple_seq_add_seq (&body, cnt_list);
/* Once lowered, extract the bounds and clauses. */
omp_extract_for_data (stmt, &fd, NULL);
if (is_gimple_omp_oacc (ctx->stmt)
&& !ctx_in_oacc_kernels_region (ctx))
lower_oacc_head_tail (gimple_location (stmt),
gimple_omp_for_clauses (stmt),
&oacc_head, &oacc_tail, ctx);
/* Add OpenACC partitioning and reduction markers just before the loop. */
if (oacc_head)
gimple_seq_add_seq (&body, oacc_head);
lower_omp_for_lastprivate (&fd, &body, &dlist, &clist, ctx);
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
{
OMP_CLAUSE_DECL (c) = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
if (DECL_P (OMP_CLAUSE_LINEAR_STEP (c)))
OMP_CLAUSE_LINEAR_STEP (c)
= maybe_lookup_decl_in_outer_ctx (OMP_CLAUSE_LINEAR_STEP (c),
ctx);
}
bool phony_loop = (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_GRID_LOOP
&& gimple_omp_for_grid_phony (stmt));
if ((ctx->scan_inclusive || ctx->scan_exclusive)
&& gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
{
gcc_assert (!phony_loop);
lower_omp_for_scan (&body, &dlist, stmt, &fd, ctx);
}
else
{
if (!phony_loop)
gimple_seq_add_stmt (&body, stmt);
gimple_seq_add_seq (&body, gimple_omp_body (stmt));
}
if (!phony_loop)
gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
fd.loop.v));
/* After the loop, add exit clauses. */
lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, &clist, ctx);
if (clist)
{
tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
gcall *g = gimple_build_call (fndecl, 0);
gimple_seq_add_stmt (&body, g);
gimple_seq_add_seq (&body, clist);
fndecl = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
g = gimple_build_call (fndecl, 0);
gimple_seq_add_stmt (&body, g);
}
if (ctx->cancellable)
gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&body, dlist);
if (rclauses)
{
gimple_seq_add_seq (&tred_ilist, body);
body = tred_ilist;
}
body = maybe_catch_exception (body);
if (!phony_loop)
{
/* Region exit marker goes at the end of the loop body. */
gimple *g = gimple_build_omp_return (fd.have_nowait);
gimple_seq_add_stmt (&body, g);
gimple_seq_add_seq (&body, tred_dlist);
maybe_add_implicit_barrier_cancel (ctx, g, &body);
if (rclauses)
OMP_CLAUSE_DECL (rclauses) = rtmp;
}
/* Add OpenACC joining and reduction markers just after the loop. */
if (oacc_tail)
gimple_seq_add_seq (&body, oacc_tail);
pop_gimplify_context (new_stmt);
gimple_bind_append_vars (new_stmt, ctx->block_vars);
maybe_remove_omp_member_access_dummy_vars (new_stmt);
BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
gimple_bind_set_body (new_stmt, body);
gimple_omp_set_body (stmt, NULL);
gimple_omp_for_set_pre_body (stmt, NULL);
}
/* Callback for walk_stmts. Check if the current statement only contains
GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
static tree
check_combined_parallel (gimple_stmt_iterator *gsi_p,
bool *handled_ops_p,
struct walk_stmt_info *wi)
{
int *info = (int *) wi->info;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_DEBUG:
break;
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_SECTIONS:
*info = *info == 0 ? 1 : -1;
break;
default:
*info = -1;
break;
}
return NULL;
}
struct omp_taskcopy_context
{
/* This field must be at the beginning, as we do "inheritance": Some
callback functions for tree-inline.c (e.g., omp_copy_decl)
receive a copy_body_data pointer that is up-casted to an
omp_context pointer. */
copy_body_data cb;
omp_context *ctx;
};
static tree
task_copyfn_copy_decl (tree var, copy_body_data *cb)
{
struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
return create_tmp_var (TREE_TYPE (var));
return var;
}
static tree
task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
{
tree name, new_fields = NULL, type, f;
type = lang_hooks.types.make_type (RECORD_TYPE);
name = DECL_NAME (TYPE_NAME (orig_type));
name = build_decl (gimple_location (tcctx->ctx->stmt),
TYPE_DECL, name, type);
TYPE_NAME (type) = name;
for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
{
tree new_f = copy_node (f);
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
TREE_CHAIN (new_f) = new_fields;
walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
&tcctx->cb, NULL);
new_fields = new_f;
tcctx->cb.decl_map->put (f, new_f);
}
TYPE_FIELDS (type) = nreverse (new_fields);
layout_type (type);
return type;
}
/* Create task copyfn. */
static void
create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
{
struct function *child_cfun;
tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
tree record_type, srecord_type, bind, list;
bool record_needs_remap = false, srecord_needs_remap = false;
splay_tree_node n;
struct omp_taskcopy_context tcctx;
location_t loc = gimple_location (task_stmt);
size_t looptempno = 0;
child_fn = gimple_omp_task_copy_fn (task_stmt);
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
gcc_assert (child_cfun->cfg == NULL);
DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
/* Reset DECL_CONTEXT on function arguments. */
for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
/* Populate the function. */
push_gimplify_context ();
push_cfun (child_cfun);
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
list = NULL;
DECL_SAVED_TREE (child_fn) = bind;
DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
/* Remap src and dst argument types if needed. */
record_type = ctx->record_type;
srecord_type = ctx->srecord_type;
for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
{
record_needs_remap = true;
break;
}
for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
{
srecord_needs_remap = true;
break;
}
if (record_needs_remap || srecord_needs_remap)
{
memset (&tcctx, '\0', sizeof (tcctx));
tcctx.cb.src_fn = ctx->cb.src_fn;
tcctx.cb.dst_fn = child_fn;
tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
gcc_checking_assert (tcctx.cb.src_node);
tcctx.cb.dst_node = tcctx.cb.src_node;
tcctx.cb.src_cfun = ctx->cb.src_cfun;
tcctx.cb.copy_decl = task_copyfn_copy_decl;
tcctx.cb.eh_lp_nr = 0;
tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
tcctx.cb.decl_map = new hash_map<tree, tree>;
tcctx.ctx = ctx;
if (record_needs_remap)
record_type = task_copyfn_remap_type (&tcctx, record_type);
if (srecord_needs_remap)
srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
}
else
tcctx.cb.decl_map = NULL;
arg = DECL_ARGUMENTS (child_fn);
TREE_TYPE (arg) = build_pointer_type (record_type);
sarg = DECL_CHAIN (arg);
TREE_TYPE (sarg) = build_pointer_type (srecord_type);
/* First pass: initialize temporaries used in record_type and srecord_type
sizes and field offsets. */
if (tcctx.cb.decl_map)
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
tree *p;
decl = OMP_CLAUSE_DECL (c);
p = tcctx.cb.decl_map->get (decl);
if (p == NULL)
continue;
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
sf = (tree) n->value;
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
append_to_statement_list (t, &list);
}
/* Second pass: copy shared var pointers and copy construct non-VLA
firstprivate vars. */
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
splay_tree_key key;
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
key = (splay_tree_key) decl;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
key = (splay_tree_key) &DECL_UID (decl);
n = splay_tree_lookup (ctx->field_map, key);
if (n == NULL)
break;
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, key);
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (decl) == MEM_REF)
{
decl = TREE_OPERAND (decl, 0);
if (TREE_CODE (decl) == POINTER_PLUS_EXPR)
decl = TREE_OPERAND (decl, 0);
if (TREE_CODE (decl) == INDIRECT_REF
|| TREE_CODE (decl) == ADDR_EXPR)
decl = TREE_OPERAND (decl, 0);
}
key = (splay_tree_key) decl;
n = splay_tree_lookup (ctx->field_map, key);
if (n == NULL)
break;
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, key);
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
if (decl != OMP_CLAUSE_DECL (c)
&& TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE
&& TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == POINTER_TYPE)
src = build_simple_mem_ref_loc (loc, src);
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
case OMP_CLAUSE__LOOPTEMP_:
/* Fields for first two _looptemp_ clauses are initialized by
GOMP_taskloop*, the rest are handled like firstprivate. */
if (looptempno < 2)
{
looptempno++;
break;
}
/* FALLTHRU */
case OMP_CLAUSE__REDUCTEMP_:
case OMP_CLAUSE_FIRSTPRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (is_variable_sized (decl))
break;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
if (n == NULL)
break;
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
if (n != NULL)
{
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
if (use_pointer_for_field (decl, NULL) || omp_is_reference (decl))
src = build_simple_mem_ref_loc (loc, src);
}
else
src = decl;
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FIRSTPRIVATE)
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
else
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
break;
case OMP_CLAUSE_PRIVATE:
if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
break;
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
if (n != NULL)
{
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
if (use_pointer_for_field (decl, NULL))
src = build_simple_mem_ref_loc (loc, src);
}
else
src = decl;
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
default:
break;
}
/* Last pass: handle VLA firstprivates. */
if (tcctx.cb.decl_map)
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
tree ind, ptr, df;
decl = OMP_CLAUSE_DECL (c);
if (!is_variable_sized (decl))
continue;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
if (n == NULL)
continue;
f = (tree) n->value;
f = *tcctx.cb.decl_map->get (f);
gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
ind = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
n = splay_tree_lookup (ctx->sfield_map,
(splay_tree_key) TREE_OPERAND (ind, 0));
sf = (tree) n->value;
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
src = build_simple_mem_ref_loc (loc, src);
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
n = splay_tree_lookup (ctx->field_map,
(splay_tree_key) TREE_OPERAND (ind, 0));
df = (tree) n->value;
df = *tcctx.cb.decl_map->get (df);
ptr = build_simple_mem_ref_loc (loc, arg);
ptr = omp_build_component_ref (ptr, df);
t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
build_fold_addr_expr_loc (loc, dst));
append_to_statement_list (t, &list);
}
t = build1 (RETURN_EXPR, void_type_node, NULL);
append_to_statement_list (t, &list);
if (tcctx.cb.decl_map)
delete tcctx.cb.decl_map;
pop_gimplify_context (NULL);
BIND_EXPR_BODY (bind) = list;
pop_cfun ();
}
static void
lower_depend_clauses (tree *pclauses, gimple_seq *iseq, gimple_seq *oseq)
{
tree c, clauses;
gimple *g;
size_t cnt[4] = { 0, 0, 0, 0 }, idx = 2, i;
clauses = omp_find_clause (*pclauses, OMP_CLAUSE_DEPEND);
gcc_assert (clauses);
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
switch (OMP_CLAUSE_DEPEND_KIND (c))
{
case OMP_CLAUSE_DEPEND_LAST:
/* Lowering already done at gimplification. */
return;
case OMP_CLAUSE_DEPEND_IN:
cnt[2]++;
break;
case OMP_CLAUSE_DEPEND_OUT:
case OMP_CLAUSE_DEPEND_INOUT:
cnt[0]++;
break;
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
cnt[1]++;
break;
case OMP_CLAUSE_DEPEND_DEPOBJ:
cnt[3]++;
break;
case OMP_CLAUSE_DEPEND_SOURCE:
case OMP_CLAUSE_DEPEND_SINK:
/* FALLTHRU */
default:
gcc_unreachable ();
}
if (cnt[1] || cnt[3])
idx = 5;
size_t total = cnt[0] + cnt[1] + cnt[2] + cnt[3];
tree type = build_array_type_nelts (ptr_type_node, total + idx);
tree array = create_tmp_var (type);
TREE_ADDRESSABLE (array) = 1;
tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
NULL_TREE);
if (idx == 5)
{
g = gimple_build_assign (r, build_int_cst (ptr_type_node, 0));
gimple_seq_add_stmt (iseq, g);
r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
NULL_TREE);
}
g = gimple_build_assign (r, build_int_cst (ptr_type_node, total));
gimple_seq_add_stmt (iseq, g);
for (i = 0; i < (idx == 5 ? 3 : 1); i++)
{
r = build4 (ARRAY_REF, ptr_type_node, array,
size_int (i + 1 + (idx == 5)), NULL_TREE, NULL_TREE);
g = gimple_build_assign (r, build_int_cst (ptr_type_node, cnt[i]));
gimple_seq_add_stmt (iseq, g);
}
for (i = 0; i < 4; i++)
{
if (cnt[i] == 0)
continue;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
continue;
else
{
switch (OMP_CLAUSE_DEPEND_KIND (c))
{
case OMP_CLAUSE_DEPEND_IN:
if (i != 2)
continue;
break;
case OMP_CLAUSE_DEPEND_OUT:
case OMP_CLAUSE_DEPEND_INOUT:
if (i != 0)
continue;
break;
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
if (i != 1)
continue;
break;
case OMP_CLAUSE_DEPEND_DEPOBJ:
if (i != 3)
continue;
break;
default:
gcc_unreachable ();
}
tree t = OMP_CLAUSE_DECL (c);
t = fold_convert (ptr_type_node, t);
gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
NULL_TREE, NULL_TREE);
g = gimple_build_assign (r, t);
gimple_seq_add_stmt (iseq, g);
}
}
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
OMP_CLAUSE_DEPEND_KIND (c) = OMP_CLAUSE_DEPEND_LAST;
OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
OMP_CLAUSE_CHAIN (c) = *pclauses;
*pclauses = c;
tree clobber = build_clobber (type);
g = gimple_build_assign (array, clobber);
gimple_seq_add_stmt (oseq, g);
}
/* Lower the OpenMP parallel or task directive in the current statement
in GSI_P. CTX holds context information for the directive. */
static void
lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree clauses;
tree child_fn, t;
gimple *stmt = gsi_stmt (*gsi_p);
gbind *par_bind, *bind, *dep_bind = NULL;
gimple_seq par_body;
location_t loc = gimple_location (stmt);
clauses = gimple_omp_taskreg_clauses (stmt);
if (gimple_code (stmt) == GIMPLE_OMP_TASK
&& gimple_omp_task_taskwait_p (stmt))
{
par_bind = NULL;
par_body = NULL;
}
else
{
par_bind
= as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
par_body = gimple_bind_body (par_bind);
}
child_fn = ctx->cb.dst_fn;
if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
&& !gimple_omp_parallel_combined_p (stmt))
{
struct walk_stmt_info wi;
int ws_num = 0;
memset (&wi, 0, sizeof (wi));
wi.info = &ws_num;
wi.val_only = true;
walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
if (ws_num == 1)
gimple_omp_parallel_set_combined_p (stmt, true);
}
gimple_seq dep_ilist = NULL;
gimple_seq dep_olist = NULL;
if (gimple_code (stmt) == GIMPLE_OMP_TASK
&& omp_find_clause (clauses, OMP_CLAUSE_DEPEND))
{
push_gimplify_context ();
dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
lower_depend_clauses (gimple_omp_task_clauses_ptr (stmt),
&dep_ilist, &dep_olist);
}
if (gimple_code (stmt) == GIMPLE_OMP_TASK
&& gimple_omp_task_taskwait_p (stmt))
{
if (dep_bind)
{
gsi_replace (gsi_p, dep_bind, true);
gimple_bind_add_seq (dep_bind, dep_ilist);
gimple_bind_add_stmt (dep_bind, stmt);
gimple_bind_add_seq (dep_bind, dep_olist);
pop_gimplify_context (dep_bind);
}
return;
}
if (ctx->srecord_type)
create_task_copyfn (as_a <gomp_task *> (stmt), ctx);
gimple_seq tskred_ilist = NULL;
gimple_seq tskred_olist = NULL;
if ((is_task_ctx (ctx)
&& gimple_omp_task_taskloop_p (ctx->stmt)
&& omp_find_clause (gimple_omp_task_clauses (ctx->stmt),
OMP_CLAUSE_REDUCTION))
|| (is_parallel_ctx (ctx)
&& omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE__REDUCTEMP_)))
{
if (dep_bind == NULL)
{
push_gimplify_context ();
dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
}
lower_omp_task_reductions (ctx, is_task_ctx (ctx) ? OMP_TASKLOOP
: OMP_PARALLEL,
gimple_omp_taskreg_clauses (ctx->stmt),
&tskred_ilist, &tskred_olist);
}
push_gimplify_context ();
gimple_seq par_olist = NULL;
gimple_seq par_ilist = NULL;
gimple_seq par_rlist = NULL;
bool phony_construct = gimple_code (stmt) == GIMPLE_OMP_PARALLEL
&& gimple_omp_parallel_grid_phony (as_a <gomp_parallel *> (stmt));
if (phony_construct && ctx->record_type)
{
gcc_checking_assert (!ctx->receiver_decl);
ctx->receiver_decl = create_tmp_var
(build_reference_type (ctx->record_type), ".omp_rec");
}
lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
lower_omp (&par_body, ctx);
if (gimple_code (stmt) != GIMPLE_OMP_TASK)
lower_reduction_clauses (clauses, &par_rlist, NULL, ctx);
/* Declare all the variables created by mapping and the variables
declared in the scope of the parallel body. */
record_vars_into (ctx->block_vars, child_fn);
maybe_remove_omp_member_access_dummy_vars (par_bind);
record_vars_into (gimple_bind_vars (par_bind), child_fn);
if (ctx->record_type)
{
ctx->sender_decl
= create_tmp_var (ctx->srecord_type ? ctx->srecord_type
: ctx->record_type, ".omp_data_o");
DECL_NAMELESS (ctx->sender_decl) = 1;
TREE_ADDRESSABLE (ctx->sender_decl) = 1;
gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
}
gimple_seq olist = NULL;
gimple_seq ilist = NULL;
lower_send_clauses (clauses, &ilist, &olist, ctx);
lower_send_shared_vars (&ilist, &olist, ctx);
if (ctx->record_type)
{
tree clobber = build_clobber (TREE_TYPE (ctx->sender_decl));
gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
clobber));
}
/* Once all the expansions are done, sequence all the different
fragments inside gimple_omp_body. */
gimple_seq new_body = NULL;
if (ctx->record_type)
{
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
/* fixup_child_record_type might have changed receiver_decl's type. */
t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (ctx->receiver_decl, t));
}
gimple_seq_add_seq (&new_body, par_ilist);
gimple_seq_add_seq (&new_body, par_body);
gimple_seq_add_seq (&new_body, par_rlist);
if (ctx->cancellable)
gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, par_olist);
new_body = maybe_catch_exception (new_body);
if (gimple_code (stmt) == GIMPLE_OMP_TASK)
gimple_seq_add_stmt (&new_body,
gimple_build_omp_continue (integer_zero_node,
integer_zero_node));
if (!phony_construct)
{
gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
gimple_omp_set_body (stmt, new_body);
}
if (dep_bind && gimple_bind_block (par_bind) == NULL_TREE)
bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
else
bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
gimple_bind_add_seq (bind, ilist);
if (!phony_construct)
gimple_bind_add_stmt (bind, stmt);
else
gimple_bind_add_seq (bind, new_body);
gimple_bind_add_seq (bind, olist);
pop_gimplify_context (NULL);
if (dep_bind)
{
gimple_bind_add_seq (dep_bind, dep_ilist);
gimple_bind_add_seq (dep_bind, tskred_ilist);
gimple_bind_add_stmt (dep_bind, bind);
gimple_bind_add_seq (dep_bind, tskred_olist);
gimple_bind_add_seq (dep_bind, dep_olist);
pop_gimplify_context (dep_bind);
}
}
/* Lower the GIMPLE_OMP_TARGET in the current statement
in GSI_P. CTX holds context information for the directive. */
static void
lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree clauses;
tree child_fn, t, c;
gomp_target *stmt = as_a <gomp_target *> (gsi_stmt (*gsi_p));
gbind *tgt_bind, *bind, *dep_bind = NULL;
gimple_seq tgt_body, olist, ilist, fplist, new_body;
location_t loc = gimple_location (stmt);
bool offloaded, data_region;
unsigned int map_cnt = 0;
offloaded = is_gimple_omp_offloaded (stmt);
switch (gimple_omp_target_kind (stmt))
{
case GF_OMP_TARGET_KIND_REGION:
case GF_OMP_TARGET_KIND_UPDATE:
case GF_OMP_TARGET_KIND_ENTER_DATA:
case GF_OMP_TARGET_KIND_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
case GF_OMP_TARGET_KIND_OACC_SERIAL:
case GF_OMP_TARGET_KIND_OACC_UPDATE:
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_DECLARE:
data_region = false;
break;
case GF_OMP_TARGET_KIND_DATA:
case GF_OMP_TARGET_KIND_OACC_DATA:
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
data_region = true;
break;
default:
gcc_unreachable ();
}
clauses = gimple_omp_target_clauses (stmt);
gimple_seq dep_ilist = NULL;
gimple_seq dep_olist = NULL;
if (omp_find_clause (clauses, OMP_CLAUSE_DEPEND))
{
push_gimplify_context ();
dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
lower_depend_clauses (gimple_omp_target_clauses_ptr (stmt),
&dep_ilist, &dep_olist);
}
tgt_bind = NULL;
tgt_body = NULL;
if (offloaded)
{
tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
tgt_body = gimple_bind_body (tgt_bind);
}
else if (data_region)
tgt_body = gimple_omp_body (stmt);
child_fn = ctx->cb.dst_fn;
push_gimplify_context ();
fplist = NULL;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var, x;
default:
break;
case OMP_CLAUSE_MAP:
#if CHECKING_P
/* First check what we're prepared to handle in the following. */
switch (OMP_CLAUSE_MAP_KIND (c))
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_POINTER:
case GOMP_MAP_TO_PSET:
case GOMP_MAP_DELETE:
case GOMP_MAP_RELEASE:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
case GOMP_MAP_STRUCT:
case GOMP_MAP_ALWAYS_POINTER:
break;
case GOMP_MAP_IF_PRESENT:
case GOMP_MAP_FORCE_ALLOC:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_FORCE_PRESENT:
case GOMP_MAP_FORCE_DEVICEPTR:
case GOMP_MAP_DEVICE_RESIDENT:
case GOMP_MAP_LINK:
case GOMP_MAP_ATTACH:
case GOMP_MAP_DETACH:
case GOMP_MAP_FORCE_DETACH:
gcc_assert (is_gimple_omp_oacc (stmt));
break;
default:
gcc_unreachable ();
}
#endif
/* FALLTHRU */
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
oacc_firstprivate:
var = OMP_CLAUSE_DECL (c);
if (!DECL_P (var))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
|| (!OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_POINTER)))
map_cnt++;
continue;
}
if (DECL_SIZE (var)
&& TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
{
tree var2 = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
var2 = TREE_OPERAND (var2, 0);
gcc_assert (DECL_P (var2));
var = var2;
}
if (offloaded
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE))
{
if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))
&& varpool_node::get_create (var)->offloadable)
continue;
tree type = build_pointer_type (TREE_TYPE (var));
tree new_var = lookup_decl (var, ctx);
x = create_tmp_var_raw (type, get_name (new_var));
gimple_add_tmp_var (x);
x = build_simple_mem_ref (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
continue;
}
if (!maybe_lookup_field (var, ctx))
continue;
/* Don't remap compute constructs' reduction variables, because the
intermediate result must be local to each gang. */
if (offloaded && !(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_IN_REDUCTION (c)))
{
x = build_receiver_ref (var, true, ctx);
tree new_var = lookup_decl (var, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
x = build_simple_mem_ref (x);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_assert (is_gimple_omp_oacc (ctx->stmt));
if (omp_is_reference (new_var)
&& (TREE_CODE (TREE_TYPE (new_var)) != POINTER_TYPE
|| DECL_BY_REFERENCE (var)))
{
/* Create a local object to hold the instance
value. */
tree type = TREE_TYPE (TREE_TYPE (new_var));
const char *id = IDENTIFIER_POINTER (DECL_NAME (new_var));
tree inst = create_tmp_var (type, id);
gimplify_assign (inst, fold_indirect_ref (x), &fplist);
x = build_fold_addr_expr (inst);
}
gimplify_assign (new_var, x, &fplist);
}
else if (DECL_P (new_var))
{
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else
gcc_unreachable ();
}
map_cnt++;
break;
case OMP_CLAUSE_FIRSTPRIVATE:
gcc_checking_assert (offloaded);
if (is_gimple_omp_oacc (ctx->stmt))
{
/* No 'firstprivate' clauses on OpenACC 'kernels'. */
gcc_checking_assert (!is_oacc_kernels (ctx));
goto oacc_firstprivate;
}
map_cnt++;
var = OMP_CLAUSE_DECL (c);
if (!omp_is_reference (var)
&& !is_gimple_reg_type (TREE_TYPE (var)))
{
tree new_var = lookup_decl (var, ctx);
if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
}
else
x = build_receiver_ref (var, true, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
case OMP_CLAUSE_PRIVATE:
gcc_checking_assert (offloaded);
if (is_gimple_omp_oacc (ctx->stmt))
{
/* No 'private' clauses on OpenACC 'kernels'. */
gcc_checking_assert (!is_oacc_kernels (ctx));
break;
}
var = OMP_CLAUSE_DECL (c);
if (is_variable_sized (var))
{
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
var = OMP_CLAUSE_DECL (c);
map_cnt++;
if (is_variable_sized (var))
{
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_ADDR
&& !omp_is_reference (var)
&& !omp_is_allocatable_or_ptr (var)
&& !lang_hooks.decls.omp_array_data (var, true))
|| TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
tree new_var = lookup_decl (var, ctx);
tree type = build_pointer_type (TREE_TYPE (var));
x = create_tmp_var_raw (type, get_name (new_var));
gimple_add_tmp_var (x);
x = build_simple_mem_ref (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else
{
tree new_var = lookup_decl (var, ctx);
x = create_tmp_var_raw (TREE_TYPE (new_var), get_name (new_var));
gimple_add_tmp_var (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
}
if (offloaded)
{
target_nesting_level++;
lower_omp (&tgt_body, ctx);
target_nesting_level--;
}
else if (data_region)
lower_omp (&tgt_body, ctx);
if (offloaded)
{
/* Declare all the variables created by mapping and the variables
declared in the scope of the target body. */
record_vars_into (ctx->block_vars, child_fn);
maybe_remove_omp_member_access_dummy_vars (tgt_bind);
record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
}
olist = NULL;
ilist = NULL;
if (ctx->record_type)
{
ctx->sender_decl
= create_tmp_var (ctx->record_type, ".omp_data_arr");
DECL_NAMELESS (ctx->sender_decl) = 1;
TREE_ADDRESSABLE (ctx->sender_decl) = 1;
t = make_tree_vec (3);
TREE_VEC_ELT (t, 0) = ctx->sender_decl;
TREE_VEC_ELT (t, 1)
= create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
".omp_data_sizes");
DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
tree tkind_type = short_unsigned_type_node;
int talign_shift = 8;
TREE_VEC_ELT (t, 2)
= create_tmp_var (build_array_type_nelts (tkind_type, map_cnt),
".omp_data_kinds");
DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
gimple_omp_target_set_data_arg (stmt, t);
vec<constructor_elt, va_gc> *vsize;
vec<constructor_elt, va_gc> *vkind;
vec_alloc (vsize, map_cnt);
vec_alloc (vkind, map_cnt);
unsigned int map_idx = 0;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree ovar, nc, s, purpose, var, x, type;
unsigned int talign;
default:
break;
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
oacc_firstprivate_map:
nc = c;
ovar = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
break;
if (!DECL_P (ovar))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
{
gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
== get_base_address (ovar));
nc = OMP_CLAUSE_CHAIN (c);
ovar = OMP_CLAUSE_DECL (nc);
}
else
{
tree x = build_sender_ref (ovar, ctx);
tree v
= build_fold_addr_expr_with_type (ovar, ptr_type_node);
gimplify_assign (x, v, &ilist);
nc = NULL_TREE;
}
}
else
{
if (DECL_SIZE (ovar)
&& TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
{
tree ovar2 = DECL_VALUE_EXPR (ovar);
gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
ovar2 = TREE_OPERAND (ovar2, 0);
gcc_assert (DECL_P (ovar2));
ovar = ovar2;
}
if (!maybe_lookup_field (ovar, ctx))
continue;
}
talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
talign = DECL_ALIGN_UNIT (ovar);
if (nc)
{
var = lookup_decl_in_outer_ctx (ovar, ctx);
x = build_sender_ref (ovar, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
{
gcc_assert (offloaded);
tree avar
= create_tmp_var (TREE_TYPE (TREE_TYPE (x)));
mark_addressable (avar);
gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
talign = DECL_ALIGN_UNIT (avar);
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_assert (is_gimple_omp_oacc (ctx->stmt));
if (!omp_is_reference (var))
{
if (is_gimple_reg (var)
&& OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
var = build_fold_addr_expr (var);
}
else
talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
gimplify_assign (x, var, &ilist);
}
else if (is_gimple_reg (var))
{
gcc_assert (offloaded);
tree avar = create_tmp_var (TREE_TYPE (var));
mark_addressable (avar);
enum gomp_map_kind map_kind = OMP_CLAUSE_MAP_KIND (c);
if (GOMP_MAP_COPY_TO_P (map_kind)
|| map_kind == GOMP_MAP_POINTER
|| map_kind == GOMP_MAP_TO_PSET
|| map_kind == GOMP_MAP_FORCE_DEVICEPTR)
{
/* If we need to initialize a temporary
with VAR because it is not addressable, and
the variable hasn't been initialized yet, then
we'll get a warning for the store to avar.
Don't warn in that case, the mapping might
be implicit. */
TREE_NO_WARNING (var) = 1;
gimplify_assign (avar, var, &ilist);
}
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
if ((GOMP_MAP_COPY_FROM_P (map_kind)
|| map_kind == GOMP_MAP_FORCE_DEVICEPTR)
&& !TYPE_READONLY (TREE_TYPE (var)))
{
x = unshare_expr (x);
x = build_simple_mem_ref (x);
gimplify_assign (var, x, &olist);
}
}
else
{
/* While MAP is handled explicitly by the FE,
for 'target update', only the identified is passed. */
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FROM
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO)
&& (omp_is_allocatable_or_ptr (var)
&& omp_check_optional_argument (var, false)))
var = build_fold_indirect_ref (var);
else if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FROM
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_TO)
|| (!omp_is_allocatable_or_ptr (var)
&& !omp_check_optional_argument (var, false)))
var = build_fold_addr_expr (var);
gimplify_assign (x, var, &ilist);
}
}
s = NULL_TREE;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
s = TREE_TYPE (ovar);
if (TREE_CODE (s) == REFERENCE_TYPE
|| omp_check_optional_argument (ovar, false))
s = TREE_TYPE (s);
s = TYPE_SIZE_UNIT (s);
}
else
s = OMP_CLAUSE_SIZE (c);
if (s == NULL_TREE)
s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
s = fold_convert (size_type_node, s);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
if (TREE_CODE (s) != INTEGER_CST)
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
unsigned HOST_WIDE_INT tkind, tkind_zero;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_MAP:
tkind = OMP_CLAUSE_MAP_KIND (c);
tkind_zero = tkind;
if (OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c))
switch (tkind)
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_IF_PRESENT:
case GOMP_MAP_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_RELEASE:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_FORCE_PRESENT:
tkind_zero = GOMP_MAP_ZERO_LEN_ARRAY_SECTION;
break;
case GOMP_MAP_DELETE:
tkind_zero = GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION;
default:
break;
}
if (tkind_zero != tkind)
{
if (integer_zerop (s))
tkind = tkind_zero;
else if (integer_nonzerop (s))
tkind_zero = tkind;
}
break;
case OMP_CLAUSE_FIRSTPRIVATE:
gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
tkind = GOMP_MAP_TO;
tkind_zero = tkind;
break;
case OMP_CLAUSE_TO:
tkind = GOMP_MAP_TO;
tkind_zero = tkind;
break;
case OMP_CLAUSE_FROM:
tkind = GOMP_MAP_FROM;
tkind_zero = tkind;
break;
default:
gcc_unreachable ();
}
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
gcc_checking_assert (tkind_zero
< (HOST_WIDE_INT_C (1U) << talign_shift));
talign = ceil_log2 (talign);
tkind |= talign << talign_shift;
tkind_zero |= talign << talign_shift;
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
gcc_checking_assert (tkind_zero
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
if (tkind == tkind_zero)
x = build_int_cstu (tkind_type, tkind);
else
{
TREE_STATIC (TREE_VEC_ELT (t, 2)) = 0;
x = build3 (COND_EXPR, tkind_type,
fold_build2 (EQ_EXPR, boolean_type_node,
unshare_expr (s), size_zero_node),
build_int_cstu (tkind_type, tkind_zero),
build_int_cstu (tkind_type, tkind));
}
CONSTRUCTOR_APPEND_ELT (vkind, purpose, x);
if (nc && nc != c)
c = nc;
break;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
goto oacc_firstprivate_map;
ovar = OMP_CLAUSE_DECL (c);
if (omp_is_reference (ovar))
talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
else
talign = DECL_ALIGN_UNIT (ovar);
var = lookup_decl_in_outer_ctx (ovar, ctx);
x = build_sender_ref (ovar, ctx);
tkind = GOMP_MAP_FIRSTPRIVATE;
type = TREE_TYPE (ovar);
if (omp_is_reference (ovar))
type = TREE_TYPE (type);
if ((INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) <= POINTER_SIZE)
|| TREE_CODE (type) == POINTER_TYPE)
{
tkind = GOMP_MAP_FIRSTPRIVATE_INT;
tree t = var;
if (omp_is_reference (var))
t = build_simple_mem_ref (var);
else if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
if (TREE_CODE (type) != POINTER_TYPE)
t = fold_convert (pointer_sized_int_node, t);
t = fold_convert (TREE_TYPE (x), t);
gimplify_assign (x, t, &ilist);
}
else if (omp_is_reference (var))
gimplify_assign (x, var, &ilist);
else if (is_gimple_reg (var))
{
tree avar = create_tmp_var (TREE_TYPE (var));
mark_addressable (avar);
if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
gimplify_assign (avar, var, &ilist);
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
}
else
{
var = build_fold_addr_expr (var);
gimplify_assign (x, var, &ilist);
}
if (tkind == GOMP_MAP_FIRSTPRIVATE_INT)
s = size_int (0);
else if (omp_is_reference (ovar))
s = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
else
s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
s = fold_convert (size_type_node, s);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
if (TREE_CODE (s) != INTEGER_CST)
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
talign = ceil_log2 (talign);
tkind |= talign << talign_shift;
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
CONSTRUCTOR_APPEND_ELT (vkind, purpose,
build_int_cstu (tkind_type, tkind));
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
ovar = OMP_CLAUSE_DECL (c);
var = lookup_decl_in_outer_ctx (ovar, ctx);
if (lang_hooks.decls.omp_array_data (ovar, true))
{
tkind = (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IS_DEVICE_PTR
? GOMP_MAP_USE_DEVICE_PTR : GOMP_MAP_FIRSTPRIVATE_INT);
x = build_sender_ref ((splay_tree_key) &DECL_NAME (ovar), ctx);
}
else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IS_DEVICE_PTR)
{
tkind = GOMP_MAP_USE_DEVICE_PTR;
x = build_sender_ref ((splay_tree_key) &DECL_UID (ovar), ctx);
}
else
{
tkind = GOMP_MAP_FIRSTPRIVATE_INT;
x = build_sender_ref (ovar, ctx);
}
if (is_gimple_omp_oacc (ctx->stmt))
{
gcc_assert (tkind == GOMP_MAP_USE_DEVICE_PTR);
if (OMP_CLAUSE_USE_DEVICE_PTR_IF_PRESENT (c))
tkind = GOMP_MAP_USE_DEVICE_PTR_IF_PRESENT;
}
type = TREE_TYPE (ovar);
if (lang_hooks.decls.omp_array_data (ovar, true))
var = lang_hooks.decls.omp_array_data (ovar, false);
else if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_ADDR
&& !omp_is_reference (ovar)
&& !omp_is_allocatable_or_ptr (ovar))
|| TREE_CODE (type) == ARRAY_TYPE)
var = build_fold_addr_expr (var);
else
{
if (omp_is_reference (ovar)
|| omp_check_optional_argument (ovar, false)
|| omp_is_allocatable_or_ptr (ovar))
{
type = TREE_TYPE (type);
if (TREE_CODE (type) != ARRAY_TYPE
&& ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_USE_DEVICE_ADDR
&& !omp_is_allocatable_or_ptr (ovar))
|| (omp_is_reference (ovar)
&& omp_is_allocatable_or_ptr (ovar))))
var = build_simple_mem_ref (var);
var = fold_convert (TREE_TYPE (x), var);
}
}
tree present;
present = omp_check_optional_argument (ovar, true);
if (present)
{
tree null_label = create_artificial_label (UNKNOWN_LOCATION);
tree notnull_label = create_artificial_label (UNKNOWN_LOCATION);
tree opt_arg_label = create_artificial_label (UNKNOWN_LOCATION);
tree new_x = unshare_expr (x);
gimplify_expr (&present, &ilist, NULL, is_gimple_val,
fb_rvalue);
gcond *cond = gimple_build_cond_from_tree (present,
notnull_label,
null_label);
gimple_seq_add_stmt (&ilist, cond);
gimple_seq_add_stmt (&ilist, gimple_build_label (null_label));
gimplify_assign (new_x, null_pointer_node, &ilist);
gimple_seq_add_stmt (&ilist, gimple_build_goto (opt_arg_label));
gimple_seq_add_stmt (&ilist,
gimple_build_label (notnull_label));
gimplify_assign (x, var, &ilist);
gimple_seq_add_stmt (&ilist,
gimple_build_label (opt_arg_label));
}
else
gimplify_assign (x, var, &ilist);
s = size_int (0);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
CONSTRUCTOR_APPEND_ELT (vkind, purpose,
build_int_cstu (tkind_type, tkind));
break;
}
gcc_assert (map_idx == map_cnt);
DECL_INITIAL (TREE_VEC_ELT (t, 1))
= build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
DECL_INITIAL (TREE_VEC_ELT (t, 2))
= build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
for (int i = 1; i <= 2; i++)
if (!TREE_STATIC (TREE_VEC_ELT (t, i)))
{
gimple_seq initlist = NULL;
force_gimple_operand (build1 (DECL_EXPR, void_type_node,
TREE_VEC_ELT (t, i)),
&initlist, true, NULL_TREE);
gimple_seq_add_seq (&ilist, initlist);
tree clobber = build_clobber (TREE_TYPE (TREE_VEC_ELT (t, i)));
gimple_seq_add_stmt (&olist,
gimple_build_assign (TREE_VEC_ELT (t, i),
clobber));
}
tree clobber = build_clobber (ctx->record_type);
gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
clobber));
}
/* Once all the expansions are done, sequence all the different
fragments inside gimple_omp_body. */
new_body = NULL;
if (offloaded
&& ctx->record_type)
{
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
/* fixup_child_record_type might have changed receiver_decl's type. */
t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (ctx->receiver_decl, t));
}
gimple_seq_add_seq (&new_body, fplist);
if (offloaded || data_region)
{
tree prev = NULL_TREE;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var, x;
default:
break;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
break;
var = OMP_CLAUSE_DECL (c);
if (omp_is_reference (var)
|| is_gimple_reg_type (TREE_TYPE (var)))
{
tree new_var = lookup_decl (var, ctx);
tree type;
type = TREE_TYPE (var);
if (omp_is_reference (var))
type = TREE_TYPE (type);
if ((INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) <= POINTER_SIZE)
|| TREE_CODE (type) == POINTER_TYPE)
{
x = build_receiver_ref (var, false, ctx);
if (TREE_CODE (type) != POINTER_TYPE)
x = fold_convert (pointer_sized_int_node, x);
x = fold_convert (type, x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
if (omp_is_reference (var))
{
tree v = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (v);
TREE_ADDRESSABLE (v) = 1;
gimple_seq_add_stmt (&new_body,
gimple_build_assign (v, x));
x = build_fold_addr_expr (v);
}
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
else
{
x = build_receiver_ref (var, !omp_is_reference (var), ctx);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
}
else if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_var = lookup_decl (pvar, ctx);
x = build_receiver_ref (var, false, ctx);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
case OMP_CLAUSE_PRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
break;
var = OMP_CLAUSE_DECL (c);
if (omp_is_reference (var))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (TREE_CONSTANT (x))
{
x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
get_name (var));
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
else
break;
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
tree new_var;
gimple_seq assign_body;
bool is_array_data;
bool do_optional_check;
assign_body = NULL;
do_optional_check = false;
var = OMP_CLAUSE_DECL (c);
is_array_data = lang_hooks.decls.omp_array_data (var, true) != NULL;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IS_DEVICE_PTR)
x = build_sender_ref (is_array_data
? (splay_tree_key) &DECL_NAME (var)
: (splay_tree_key) &DECL_UID (var), ctx);
else
x = build_receiver_ref (var, false, ctx);
if (is_array_data)
{
bool is_ref = omp_is_reference (var);
do_optional_check = true;
/* First, we copy the descriptor data from the host; then
we update its data to point to the target address. */
new_var = lookup_decl (var, ctx);
new_var = DECL_VALUE_EXPR (new_var);
tree v = new_var;
if (is_ref)
{
var = build_fold_indirect_ref (var);
gimplify_expr (&var, &assign_body, NULL, is_gimple_val,
fb_rvalue);
v = create_tmp_var_raw (TREE_TYPE (var), get_name (var));
gimple_add_tmp_var (v);
TREE_ADDRESSABLE (v) = 1;
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (v, var));
tree rhs = build_fold_addr_expr (v);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (new_var, rhs));
}
else
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (new_var, var));
tree v2 = lang_hooks.decls.omp_array_data (unshare_expr (v), false);
gcc_assert (v2);
gimplify_expr (&x, &assign_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (v2, x));
}
else if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
new_var = lookup_decl (pvar, ctx);
gimplify_expr (&x, &assign_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (new_var, x));
}
else if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_ADDR
&& !omp_is_reference (var)
&& !omp_is_allocatable_or_ptr (var))
|| TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
new_var = lookup_decl (var, ctx);
new_var = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_var = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_var));
gimplify_expr (&x, &assign_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (new_var, x));
}
else
{
tree type = TREE_TYPE (var);
new_var = lookup_decl (var, ctx);
if (omp_is_reference (var))
{
type = TREE_TYPE (type);
if (TREE_CODE (type) != ARRAY_TYPE
&& (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_USE_DEVICE_ADDR
|| (omp_is_reference (var)
&& omp_is_allocatable_or_ptr (var))))
{
tree v = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (v);
TREE_ADDRESSABLE (v) = 1;
x = fold_convert (type, x);
gimplify_expr (&x, &assign_body, NULL, is_gimple_val,
fb_rvalue);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (v, x));
x = build_fold_addr_expr (v);
do_optional_check = true;
}
}
new_var = DECL_VALUE_EXPR (new_var);
x = fold_convert (TREE_TYPE (new_var), x);
gimplify_expr (&x, &assign_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (new_var, x));
}
tree present;
present = (do_optional_check
? omp_check_optional_argument (OMP_CLAUSE_DECL (c), true)
: NULL_TREE);
if (present)
{
tree null_label = create_artificial_label (UNKNOWN_LOCATION);
tree notnull_label = create_artificial_label (UNKNOWN_LOCATION);
tree opt_arg_label = create_artificial_label (UNKNOWN_LOCATION);
glabel *null_glabel = gimple_build_label (null_label);
glabel *notnull_glabel = gimple_build_label (notnull_label);
ggoto *opt_arg_ggoto = gimple_build_goto (opt_arg_label);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
gimplify_expr (&present, &new_body, NULL, is_gimple_val,
fb_rvalue);
gcond *cond = gimple_build_cond_from_tree (present,
notnull_label,
null_label);
gimple_seq_add_stmt (&new_body, cond);
gimple_seq_add_stmt (&new_body, null_glabel);
gimplify_assign (new_var, null_pointer_node, &new_body);
gimple_seq_add_stmt (&new_body, opt_arg_ggoto);
gimple_seq_add_stmt (&new_body, notnull_glabel);
gimple_seq_add_seq (&new_body, assign_body);
gimple_seq_add_stmt (&new_body,
gimple_build_label (opt_arg_label));
}
else
gimple_seq_add_seq (&new_body, assign_body);
break;
}
/* Handle GOMP_MAP_FIRSTPRIVATE_{POINTER,REFERENCE} in second pass,
so that firstprivate vars holding OMP_CLAUSE_SIZE if needed
are already handled. Similarly OMP_CLAUSE_PRIVATE for VLAs
or references to VLAs. */
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var;
default:
break;
case OMP_CLAUSE_MAP:
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
poly_int64 offset = 0;
gcc_assert (prev);
var = OMP_CLAUSE_DECL (c);
if (DECL_P (var)
&& TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
&& is_global_var (maybe_lookup_decl_in_outer_ctx (var,
ctx))
&& varpool_node::get_create (var)->offloadable)
break;
if (TREE_CODE (var) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (var, 0)) == COMPONENT_REF)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == COMPONENT_REF)
{
var = get_addr_base_and_unit_offset (var, &offset);
gcc_assert (var != NULL_TREE && DECL_P (var));
}
else if (DECL_SIZE (var)
&& TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
{
tree var2 = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
var2 = TREE_OPERAND (var2, 0);
gcc_assert (DECL_P (var2));
var = var2;
}
tree new_var = lookup_decl (var, ctx), x;
tree type = TREE_TYPE (new_var);
bool is_ref;
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == INDIRECT_REF
&& (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0))
== COMPONENT_REF))
{
type = TREE_TYPE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0));
is_ref = true;
new_var = build2 (MEM_REF, type,
build_fold_addr_expr (new_var),
build_int_cst (build_pointer_type (type),
offset));
}
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF)
{
type = TREE_TYPE (OMP_CLAUSE_DECL (c));
is_ref = TREE_CODE (type) == REFERENCE_TYPE;
new_var = build2 (MEM_REF, type,
build_fold_addr_expr (new_var),
build_int_cst (build_pointer_type (type),
offset));
}
else
is_ref = omp_is_reference (var);
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
is_ref = false;
bool ref_to_array = false;
if (is_ref)
{
type = TREE_TYPE (type);
if (TREE_CODE (type) == ARRAY_TYPE)
{
type = build_pointer_type (type);
ref_to_array = true;
}
}
else if (TREE_CODE (type) == ARRAY_TYPE)
{
tree decl2 = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (decl2) == MEM_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
new_var = decl2;
type = TREE_TYPE (new_var);
}
x = build_receiver_ref (OMP_CLAUSE_DECL (prev), false, ctx);
x = fold_convert_loc (clause_loc, type, x);
if (!integer_zerop (OMP_CLAUSE_SIZE (c)))
{
tree bias = OMP_CLAUSE_SIZE (c);
if (DECL_P (bias))
bias = lookup_decl (bias, ctx);
bias = fold_convert_loc (clause_loc, sizetype, bias);
bias = fold_build1_loc (clause_loc, NEGATE_EXPR, sizetype,
bias);
x = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (x), x, bias);
}
if (ref_to_array)
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
if (is_ref && !ref_to_array)
{
tree t = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (t);
TREE_ADDRESSABLE (t) = 1;
gimple_seq_add_stmt (&new_body,
gimple_build_assign (t, x));
x = build_fold_addr_expr_loc (clause_loc, t);
}
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
prev = NULL_TREE;
}
else if (OMP_CLAUSE_CHAIN (c)
&& OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (c))
== OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
prev = c;
break;
case OMP_CLAUSE_PRIVATE:
var = OMP_CLAUSE_DECL (c);
if (is_variable_sized (var))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree al = size_int (DECL_ALIGN (var));
tree x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
x = fold_convert_loc (clause_loc, TREE_TYPE (new_pvar), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_pvar, x));
}
else if (omp_is_reference (var) && !is_gimple_omp_oacc (ctx->stmt))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
tree x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (TREE_CONSTANT (x))
break;
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree rtype = TREE_TYPE (TREE_TYPE (new_var));
tree al = size_int (TYPE_ALIGN (rtype));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
}
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
}
gimple_seq fork_seq = NULL;
gimple_seq join_seq = NULL;
if (offloaded && is_gimple_omp_oacc (ctx->stmt))
{
/* If there are reductions on the offloaded region itself, treat
them as a dummy GANG loop. */
tree level = build_int_cst (integer_type_node, GOMP_DIM_GANG);
lower_oacc_reductions (gimple_location (ctx->stmt), clauses, level,
false, NULL, NULL, &fork_seq, &join_seq, ctx);
}
gimple_seq_add_seq (&new_body, fork_seq);
gimple_seq_add_seq (&new_body, tgt_body);
gimple_seq_add_seq (&new_body, join_seq);
if (offloaded)
{
new_body = maybe_catch_exception (new_body);
gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
}
gimple_omp_set_body (stmt, new_body);
}
bind = gimple_build_bind (NULL, NULL,
tgt_bind ? gimple_bind_block (tgt_bind)
: NULL_TREE);
gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
gimple_bind_add_seq (bind, ilist);
gimple_bind_add_stmt (bind, stmt);
gimple_bind_add_seq (bind, olist);
pop_gimplify_context (NULL);
if (dep_bind)
{
gimple_bind_add_seq (dep_bind, dep_ilist);
gimple_bind_add_stmt (dep_bind, bind);
gimple_bind_add_seq (dep_bind, dep_olist);
pop_gimplify_context (dep_bind);
}
}
/* Expand code for an OpenMP teams directive. */
static void
lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gomp_teams *teams_stmt = as_a <gomp_teams *> (gsi_stmt (*gsi_p));
push_gimplify_context ();
tree block = make_node (BLOCK);
gbind *bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_seq bind_body = NULL;
gimple_seq dlist = NULL;
gimple_seq olist = NULL;
tree num_teams = omp_find_clause (gimple_omp_teams_clauses (teams_stmt),
OMP_CLAUSE_NUM_TEAMS);
if (num_teams == NULL_TREE)
num_teams = build_int_cst (unsigned_type_node, 0);
else
{
num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
num_teams = fold_convert (unsigned_type_node, num_teams);
gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
}
tree thread_limit = omp_find_clause (gimple_omp_teams_clauses (teams_stmt),
OMP_CLAUSE_THREAD_LIMIT);
if (thread_limit == NULL_TREE)
thread_limit = build_int_cst (unsigned_type_node, 0);
else
{
thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
thread_limit = fold_convert (unsigned_type_node, thread_limit);
gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
fb_rvalue);
}
lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
&bind_body, &dlist, ctx, NULL);
lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist,
NULL, ctx);
if (!gimple_omp_teams_grid_phony (teams_stmt))
{
gimple_seq_add_stmt (&bind_body, teams_stmt);
location_t loc = gimple_location (teams_stmt);
tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
gimple *call = gimple_build_call (decl, 2, num_teams, thread_limit);
gimple_set_location (call, loc);
gimple_seq_add_stmt (&bind_body, call);
}
gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
gimple_omp_set_body (teams_stmt, NULL);
gimple_seq_add_seq (&bind_body, olist);
gimple_seq_add_seq (&bind_body, dlist);
if (!gimple_omp_teams_grid_phony (teams_stmt))
gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
gimple_bind_set_body (bind, bind_body);
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
}
/* Expand code within an artificial GIMPLE_OMP_GRID_BODY OMP construct. */
static void
lower_omp_grid_body (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_seq_add_stmt (gimple_omp_body_ptr (stmt),
gimple_build_omp_return (false));
}
/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
regimplified. If DATA is non-NULL, lower_omp_1 is outside
of OMP context, but with task_shared_vars set. */
static tree
lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
void *data)
{
tree t = *tp;
/* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
if (VAR_P (t) && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
return t;
if (task_shared_vars
&& DECL_P (t)
&& bitmap_bit_p (task_shared_vars, DECL_UID (t)))
return t;
/* If a global variable has been privatized, TREE_CONSTANT on
ADDR_EXPR might be wrong. */
if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (t);
*walk_subtrees = !IS_TYPE_OR_DECL_P (t);
return NULL_TREE;
}
/* Data to be communicated between lower_omp_regimplify_operands and
lower_omp_regimplify_operands_p. */
struct lower_omp_regimplify_operands_data
{
omp_context *ctx;
vec<tree> *decls;
};
/* Helper function for lower_omp_regimplify_operands. Find
omp_member_access_dummy_var vars and adjust temporarily their
DECL_VALUE_EXPRs if needed. */
static tree
lower_omp_regimplify_operands_p (tree *tp, int *walk_subtrees,
void *data)
{
tree t = omp_member_access_dummy_var (*tp);
if (t)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
lower_omp_regimplify_operands_data *ldata
= (lower_omp_regimplify_operands_data *) wi->info;
tree o = maybe_lookup_decl (t, ldata->ctx);
if (o != t)
{
ldata->decls->safe_push (DECL_VALUE_EXPR (*tp));
ldata->decls->safe_push (*tp);
tree v = unshare_and_remap (DECL_VALUE_EXPR (*tp), t, o);
SET_DECL_VALUE_EXPR (*tp, v);
}
}
*walk_subtrees = !IS_TYPE_OR_DECL_P (*tp);
return NULL_TREE;
}
/* Wrapper around gimple_regimplify_operands that adjusts DECL_VALUE_EXPRs
of omp_member_access_dummy_var vars during regimplification. */
static void
lower_omp_regimplify_operands (omp_context *ctx, gimple *stmt,
gimple_stmt_iterator *gsi_p)
{
auto_vec<tree, 10> decls;
if (ctx)
{
struct walk_stmt_info wi;
memset (&wi, '\0', sizeof (wi));
struct lower_omp_regimplify_operands_data data;
data.ctx = ctx;
data.decls = &decls;
wi.info = &data;
walk_gimple_op (stmt, lower_omp_regimplify_operands_p, &wi);
}
gimple_regimplify_operands (stmt, gsi_p);
while (!decls.is_empty ())
{
tree t = decls.pop ();
tree v = decls.pop ();
SET_DECL_VALUE_EXPR (t, v);
}
}
static void
lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
struct walk_stmt_info wi;
gcall *call_stmt;
if (gimple_has_location (stmt))
input_location = gimple_location (stmt);
if (task_shared_vars)
memset (&wi, '\0', sizeof (wi));
/* If we have issued syntax errors, avoid doing any heavy lifting.
Just replace the OMP directives with a NOP to avoid
confusing RTL expansion. */
if (seen_error () && is_gimple_omp (stmt))
{
gsi_replace (gsi_p, gimple_build_nop (), true);
return;
}
switch (gimple_code (stmt))
{
case GIMPLE_COND:
{
gcond *cond_stmt = as_a <gcond *> (stmt);
if ((ctx || task_shared_vars)
&& (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
lower_omp_regimplify_p,
ctx ? NULL : &wi, NULL)
|| walk_tree (gimple_cond_rhs_ptr (cond_stmt),
lower_omp_regimplify_p,
ctx ? NULL : &wi, NULL)))
lower_omp_regimplify_operands (ctx, cond_stmt, gsi_p);
}
break;
case GIMPLE_CATCH:
lower_omp (gimple_catch_handler_ptr (as_a <gcatch *> (stmt)), ctx);
break;
case GIMPLE_EH_FILTER:
lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
break;
case GIMPLE_TRY:
lower_omp (gimple_try_eval_ptr (stmt), ctx);
lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
break;
case GIMPLE_TRANSACTION:
lower_omp (gimple_transaction_body_ptr (as_a <gtransaction *> (stmt)),
ctx);
break;
case GIMPLE_BIND:
lower_omp (gimple_bind_body_ptr (as_a <gbind *> (stmt)), ctx);
maybe_remove_omp_member_access_dummy_vars (as_a <gbind *> (stmt));
break;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_taskreg (gsi_p, ctx);
break;
case GIMPLE_OMP_FOR:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_for (gsi_p, ctx);
break;
case GIMPLE_OMP_SECTIONS:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_sections (gsi_p, ctx);
break;
case GIMPLE_OMP_SINGLE:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_single (gsi_p, ctx);
break;
case GIMPLE_OMP_MASTER:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_master (gsi_p, ctx);
break;
case GIMPLE_OMP_TASKGROUP:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_taskgroup (gsi_p, ctx);
break;
case GIMPLE_OMP_ORDERED:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_ordered (gsi_p, ctx);
break;
case GIMPLE_OMP_SCAN:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_scan (gsi_p, ctx);
break;
case GIMPLE_OMP_CRITICAL:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_critical (gsi_p, ctx);
break;
case GIMPLE_OMP_ATOMIC_LOAD:
if ((ctx || task_shared_vars)
&& walk_tree (gimple_omp_atomic_load_rhs_ptr (
as_a <gomp_atomic_load *> (stmt)),
lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
lower_omp_regimplify_operands (ctx, stmt, gsi_p);
break;
case GIMPLE_OMP_TARGET:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_target (gsi_p, ctx);
break;
case GIMPLE_OMP_TEAMS:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (gimple_omp_teams_host (as_a <gomp_teams *> (stmt)))
lower_omp_taskreg (gsi_p, ctx);
else
lower_omp_teams (gsi_p, ctx);
break;
case GIMPLE_OMP_GRID_BODY:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_grid_body (gsi_p, ctx);
break;
case GIMPLE_CALL:
tree fndecl;
call_stmt = as_a <gcall *> (stmt);
fndecl = gimple_call_fndecl (call_stmt);
if (fndecl
&& fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_GOMP_BARRIER:
if (ctx == NULL)
break;
/* FALLTHRU */
case BUILT_IN_GOMP_CANCEL:
case BUILT_IN_GOMP_CANCELLATION_POINT:
omp_context *cctx;
cctx = ctx;
if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
cctx = cctx->outer;
gcc_assert (gimple_call_lhs (call_stmt) == NULL_TREE);
if (!cctx->cancellable)
{
if (DECL_FUNCTION_CODE (fndecl)
== BUILT_IN_GOMP_CANCELLATION_POINT)
{
stmt = gimple_build_nop ();
gsi_replace (gsi_p, stmt, false);
}
break;
}
if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
{
fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
gimple_call_set_fndecl (call_stmt, fndecl);
gimple_call_set_fntype (call_stmt, TREE_TYPE (fndecl));
}
tree lhs;
lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)));
gimple_call_set_lhs (call_stmt, lhs);
tree fallthru_label;
fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
gimple *g;
g = gimple_build_label (fallthru_label);
gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
g = gimple_build_cond (NE_EXPR, lhs,
fold_convert (TREE_TYPE (lhs),
boolean_false_node),
cctx->cancel_label, fallthru_label);
gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
break;
default:
break;
}
goto regimplify;
case GIMPLE_ASSIGN:
for (omp_context *up = ctx; up; up = up->outer)
{
if (gimple_code (up->stmt) == GIMPLE_OMP_ORDERED
|| gimple_code (up->stmt) == GIMPLE_OMP_CRITICAL
|| gimple_code (up->stmt) == GIMPLE_OMP_TASKGROUP
|| gimple_code (up->stmt) == GIMPLE_OMP_SECTION
|| gimple_code (up->stmt) == GIMPLE_OMP_SCAN
|| (gimple_code (up->stmt) == GIMPLE_OMP_TARGET
&& (gimple_omp_target_kind (up->stmt)
== GF_OMP_TARGET_KIND_DATA)))
continue;
else if (!up->lastprivate_conditional_map)
break;
tree lhs = get_base_address (gimple_assign_lhs (stmt));
if (TREE_CODE (lhs) == MEM_REF
&& DECL_P (TREE_OPERAND (lhs, 0))
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs,
0))) == REFERENCE_TYPE)
lhs = TREE_OPERAND (lhs, 0);
if (DECL_P (lhs))
if (tree *v = up->lastprivate_conditional_map->get (lhs))
{
tree clauses;
if (up->combined_into_simd_safelen1)
{
up = up->outer;
if (gimple_code (up->stmt) == GIMPLE_OMP_SCAN)
up = up->outer;
}
if (gimple_code (up->stmt) == GIMPLE_OMP_FOR)
clauses = gimple_omp_for_clauses (up->stmt);
else
clauses = gimple_omp_sections_clauses (up->stmt);
tree c = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_);
if (!OMP_CLAUSE__CONDTEMP__ITER (c))
c = omp_find_clause (OMP_CLAUSE_CHAIN (c),
OMP_CLAUSE__CONDTEMP_);
gcc_assert (OMP_CLAUSE__CONDTEMP__ITER (c));
gimple *g = gimple_build_assign (*v, OMP_CLAUSE_DECL (c));
gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
}
}
/* FALLTHRU */
default:
regimplify:
if ((ctx || task_shared_vars)
&& walk_gimple_op (stmt, lower_omp_regimplify_p,
ctx ? NULL : &wi))
{
/* Just remove clobbers, this should happen only if we have
"privatized" local addressable variables in SIMD regions,
the clobber isn't needed in that case and gimplifying address
of the ARRAY_REF into a pointer and creating MEM_REF based
clobber would create worse code than we get with the clobber
dropped. */
if (gimple_clobber_p (stmt))
{
gsi_replace (gsi_p, gimple_build_nop (), true);
break;
}
lower_omp_regimplify_operands (ctx, stmt, gsi_p);
}
break;
}
}
static void
lower_omp (gimple_seq *body, omp_context *ctx)
{
location_t saved_location = input_location;
gimple_stmt_iterator gsi;
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
lower_omp_1 (&gsi, ctx);
/* During gimplification, we haven't folded statments inside offloading
or taskreg regions (gimplify.c:maybe_fold_stmt); do that now. */
if (target_nesting_level || taskreg_nesting_level)
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
fold_stmt (&gsi);
input_location = saved_location;
}
/* Main entry point. */
static unsigned int
execute_lower_omp (void)
{
gimple_seq body;
int i;
omp_context *ctx;
/* This pass always runs, to provide PROP_gimple_lomp.
But often, there is nothing to do. */
if (flag_openacc == 0 && flag_openmp == 0
&& flag_openmp_simd == 0)
return 0;
all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
delete_omp_context);
body = gimple_body (current_function_decl);
if (hsa_gen_requested_p ())
omp_grid_gridify_all_targets (&body);
scan_omp (&body, NULL);
gcc_assert (taskreg_nesting_level == 0);
FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
finish_taskreg_scan (ctx);
taskreg_contexts.release ();
if (all_contexts->root)
{
if (task_shared_vars)
push_gimplify_context ();
lower_omp (&body, NULL);
if (task_shared_vars)
pop_gimplify_context (NULL);
}
if (all_contexts)
{
splay_tree_delete (all_contexts);
all_contexts = NULL;
}
BITMAP_FREE (task_shared_vars);
BITMAP_FREE (global_nonaddressable_vars);
/* If current function is a method, remove artificial dummy VAR_DECL created
for non-static data member privatization, they aren't needed for
debuginfo nor anything else, have been already replaced everywhere in the
IL and cause problems with LTO. */
if (DECL_ARGUMENTS (current_function_decl)
&& DECL_ARTIFICIAL (DECL_ARGUMENTS (current_function_decl))
&& (TREE_CODE (TREE_TYPE (DECL_ARGUMENTS (current_function_decl)))
== POINTER_TYPE))
remove_member_access_dummy_vars (DECL_INITIAL (current_function_decl));
return 0;
}
namespace {
const pass_data pass_data_lower_omp =
{
GIMPLE_PASS, /* type */
"omplower", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
PROP_gimple_any, /* properties_required */
PROP_gimple_lomp | PROP_gimple_lomp_dev, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_lower_omp : public gimple_opt_pass
{
public:
pass_lower_omp (gcc::context *ctxt)
: gimple_opt_pass (pass_data_lower_omp, ctxt)
{}
/* opt_pass methods: */
virtual unsigned int execute (function *) { return execute_lower_omp (); }
}; // class pass_lower_omp
} // anon namespace
gimple_opt_pass *
make_pass_lower_omp (gcc::context *ctxt)
{
return new pass_lower_omp (ctxt);
}
/* The following is a utility to diagnose structured block violations.
It is not part of the "omplower" pass, as that's invoked too late. It
should be invoked by the respective front ends after gimplification. */
static splay_tree all_labels;
/* Check for mismatched contexts and generate an error if needed. Return
true if an error is detected. */
static bool
diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
gimple *branch_ctx, gimple *label_ctx)
{
gcc_checking_assert (!branch_ctx || is_gimple_omp (branch_ctx));
gcc_checking_assert (!label_ctx || is_gimple_omp (label_ctx));
if (label_ctx == branch_ctx)
return false;
const char* kind = NULL;
if (flag_openacc)
{
if ((branch_ctx && is_gimple_omp_oacc (branch_ctx))
|| (label_ctx && is_gimple_omp_oacc (label_ctx)))
{
gcc_checking_assert (kind == NULL);
kind = "OpenACC";
}
}
if (kind == NULL)
{
gcc_checking_assert (flag_openmp || flag_openmp_simd);
kind = "OpenMP";
}
/* Previously we kept track of the label's entire context in diagnose_sb_[12]
so we could traverse it and issue a correct "exit" or "enter" error
message upon a structured block violation.
We built the context by building a list with tree_cons'ing, but there is
no easy counterpart in gimple tuples. It seems like far too much work
for issuing exit/enter error messages. If someone really misses the
distinct error message... patches welcome. */
#if 0
/* Try to avoid confusing the user by producing and error message
with correct "exit" or "enter" verbiage. We prefer "exit"
unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
if (branch_ctx == NULL)
exit_p = false;
else
{
while (label_ctx)
{
if (TREE_VALUE (label_ctx) == branch_ctx)
{
exit_p = false;
break;
}
label_ctx = TREE_CHAIN (label_ctx);
}
}
if (exit_p)
error ("invalid exit from %s structured block", kind);
else
error ("invalid entry to %s structured block", kind);
#endif
/* If it's obvious we have an invalid entry, be specific about the error. */
if (branch_ctx == NULL)
error ("invalid entry to %s structured block", kind);
else
{
/* Otherwise, be vague and lazy, but efficient. */
error ("invalid branch to/from %s structured block", kind);
}
gsi_replace (gsi_p, gimple_build_nop (), false);
return true;
}
/* Pass 1: Create a minimal tree of structured blocks, and record
where each label is found. */
static tree
diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *context = (gimple *) wi->info;
gimple *inner_context;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_TASKGROUP:
/* The minimal context here is just the current OMP construct. */
inner_context = stmt;
wi->info = inner_context;
walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
wi->info = context;
break;
case GIMPLE_OMP_FOR:
inner_context = stmt;
wi->info = inner_context;
/* gimple_omp_for_{index,initial,final} are all DECLs; no need to
walk them. */
walk_gimple_seq (gimple_omp_for_pre_body (stmt),
diagnose_sb_1, NULL, wi);
walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
wi->info = context;
break;
case GIMPLE_LABEL:
splay_tree_insert (all_labels,
(splay_tree_key) gimple_label_label (
as_a <glabel *> (stmt)),
(splay_tree_value) context);
break;
default:
break;
}
return NULL_TREE;
}
/* Pass 2: Check each branch and see if its context differs from that of
the destination label's context. */
static tree
diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *context = (gimple *) wi->info;
splay_tree_node n;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_TASKGROUP:
wi->info = stmt;
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;
break;
case GIMPLE_OMP_FOR:
wi->info = stmt;
/* gimple_omp_for_{index,initial,final} are all DECLs; no need to
walk them. */
walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
diagnose_sb_2, NULL, wi);
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;
break;
case GIMPLE_COND:
{
gcond *cond_stmt = as_a <gcond *> (stmt);
tree lab = gimple_cond_true_label (cond_stmt);
if (lab)
{
n = splay_tree_lookup (all_labels,
(splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context,
n ? (gimple *) n->value : NULL);
}
lab = gimple_cond_false_label (cond_stmt);
if (lab)
{
n = splay_tree_lookup (all_labels,
(splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context,
n ? (gimple *) n->value : NULL);
}
}
break;
case GIMPLE_GOTO:
{
tree lab = gimple_goto_dest (stmt);
if (TREE_CODE (lab) != LABEL_DECL)
break;
n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context, n ? (gimple *) n->value : NULL);
}
break;
case GIMPLE_SWITCH:
{
gswitch *switch_stmt = as_a <gswitch *> (stmt);
unsigned int i;
for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
{
tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
if (n && diagnose_sb_0 (gsi_p, context, (gimple *) n->value))
break;
}
}
break;
case GIMPLE_RETURN:
diagnose_sb_0 (gsi_p, context, NULL);
break;
default:
break;
}
return NULL_TREE;
}
static unsigned int
diagnose_omp_structured_block_errors (void)
{
struct walk_stmt_info wi;
gimple_seq body = gimple_body (current_function_decl);
all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
memset (&wi, 0, sizeof (wi));
walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
memset (&wi, 0, sizeof (wi));
wi.want_locations = true;
walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
gimple_set_body (current_function_decl, body);
splay_tree_delete (all_labels);
all_labels = NULL;
return 0;
}
namespace {
const pass_data pass_data_diagnose_omp_blocks =
{
GIMPLE_PASS, /* type */
"*diagnose_omp_blocks", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
PROP_gimple_any, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_diagnose_omp_blocks : public gimple_opt_pass
{
public:
pass_diagnose_omp_blocks (gcc::context *ctxt)
: gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *)
{
return flag_openacc || flag_openmp || flag_openmp_simd;
}
virtual unsigned int execute (function *)
{
return diagnose_omp_structured_block_errors ();
}
}; // class pass_diagnose_omp_blocks
} // anon namespace
gimple_opt_pass *
make_pass_diagnose_omp_blocks (gcc::context *ctxt)
{
return new pass_diagnose_omp_blocks (ctxt);
}
#include "gt-omp-low.h"
|
hsrp_fmt_plug.c | /*
* Cracker for MD5 authentication in HSRP, HSRPv2, VRRP, and GLBP.
* http://www.rfc-editor.org/rfc/rfc1828.txt
*
* This is dedicated to Darya. You inspire me.
*
* This software is Copyright (c) 2014, Dhiru Kholia <dhiru [at] openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* optimized Feb 2016, JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_hsrp;
#elif FMT_REGISTERS_H
john_register_one(&fmt_hsrp);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
// OMP_SCALE tuned on core i7 4-core HT
// 2048 - 8850k 6679k
// 4096 - 10642k 7278k
// 8192 - 10489k 7532k
// 16k - 10413k 7694k
// 32k - 12111k 7803k ** this value chosen
// 64k - 12420k 6523k
// 128k - 12220k 6741k
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 8192
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 32768
#endif
#endif
#endif
#include "arch.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "hsrp"
#define FORMAT_NAME "\"MD5 authentication\" HSRP, HSRPv2, VRRP, GLBP"
#define FORMAT_TAG "$hsrp$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 55 // Must fit in a single MD5 block
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE sizeof(struct custom_salt)
#define REAL_SALT_SIZE 50
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$hsrp$000004030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$52e1db09d18d695b8fefb3730ff8d9d6", "password12345"},
{"$hsrp$000004030a5a01000000000000000000ac102801041c01000000ac1028140000000000000000000000000000000000000000$f15dfa631a0679e0801f8e6b0c0c17ac", "openwall"},
{"$hsrp$000010030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$f02fc41b1b516e2d1261d8800d39ccea", "openwall12345"},
/* HSRPv2 hashes */
{"$hsrp$0128020006040001aabbcc000a000000006400000bb8000027100a000064000000000000000000000000041c010000000a00000a0000000000000000000000000000000000000000$642fedafe1f374bd2fdd8f1ba81d87a2", "password"},
{"$hsrp$0128020006040001aabbcc001400000000c800000bb8000027100a000064000000000000000000000000041c010000000a0000140000000000000000000000000000000000000000$0481257f0fe583b275f03a48e88de72f", "password12345"},
{NULL}
};
static char (*saved_key)[64]; // 1 full limb of MD5, we do out work IN this buffer.
static MD5_CTX (*saved_ctx);
static int *saved_len, dirty;
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
int length;
unsigned char salt[2048]; // be safe ;)
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
saved_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_ctx));
}
static void done(void)
{
MEM_FREE(saved_ctx);
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q = NULL;
int len;
p = ciphertext;
if (strncmp(p, FORMAT_TAG, TAG_LENGTH))
return 0;
p += TAG_LENGTH;
q = strrchr(ciphertext, '$');
if (!q || q+1==p)
return 0;
q = q + 1;
// if ((q - p - 1) > REAL_SALT_SIZE * 2)
// return 0;
len = strspn(q, HEXCHARS_lc);
if (len != BINARY_SIZE * 2 || len != strlen(q))
return 0;
if (strspn(p, HEXCHARS_lc) != q - p - 1)
return 0;
if (q-p > (sizeof(cur_salt->salt)-1)*2)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
int i, len;
memset(&cs, 0, SALT_SIZE);
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
len = (strrchr(ciphertext, '$') - ciphertext) / 2;
for (i = 0; i < len; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) |
atoi16[ARCH_INDEX(ciphertext[2 * i + 1])];
cs.length = len;
return &cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#define PUTCHAR(buf, index, val) ((unsigned char*)(buf))[index] = (val)
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
MD5_CTX ctx;
int len = saved_len[index];
if (dirty) {
// we use the saved_key buffer in-line.
unsigned int *block = (unsigned int*)saved_key[index];
MD5_Init(&saved_ctx[index]);
// set bit
saved_key[index][len] = 0x80;
block[14] = len << 3;
#if (ARCH_LITTLE_ENDIAN==0)
block[14] = JOHNSWAP(block[14]);
#endif
MD5_Update(&saved_ctx[index], (unsigned char*)block, 64);
// clear the bit, so that get_key returns proper key.
saved_key[index][len] = 0;
}
memcpy(&ctx, &saved_ctx[index], sizeof(MD5_CTX));
// data
MD5_Update(&ctx, cur_salt->salt, cur_salt->length);
// key (again)
MD5_Update(&ctx, saved_key[index], len);
MD5_Final((unsigned char*)crypt_out[index], &ctx);
}
dirty = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void hsrp_set_key(char *key, int index)
{
int olen = saved_len[index];
int len= strlen(key);
saved_len[index] = len;
strcpy(saved_key[index], key);
if (olen > len)
memset(&(saved_key[index][len]), 0, olen-len);
dirty = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_hsrp = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
hsrp_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
tinyexr.h | /*
Copyright (c) 2014 - 2017, Syoyo Fujita
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
#ifndef TINYEXR_H_
#define TINYEXR_H_
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-5)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-6)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7)
#define TINYEXR_ERROR_INVALID_HEADER (-8)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute custom_attributes[TINYEXR_MAX_ATTRIBUTES];
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { to be removed. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Free's internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Free's internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if succes.
// Returns negative value and may set error string in `err` when there's an
// error
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEIFNED
#define TINYEXR_IMPLEMENTATION_DEIFNED
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
#include <string>
#include <vector>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#endif // __cplusplus > 199711L
#ifdef _OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#include "zfp.h"
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next,
*const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next,
*const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags &
~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n,
use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d,
(d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning( \
disable : 4267) // 'argument': conversion from '__int64' to 'int',
// possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
}
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static const int kEXRVersionSize = 8;
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
static const char *ReadString(std::string *s, const char *ptr) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((*q) != 0) q++;
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen));
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
int data_window[4];
int line_order;
int display_window[4];
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window[0] = 0;
data_window[1] = 0;
data_window[2] = 0;
data_window[3] = 0;
line_order = 0;
display_window[0] = 0;
display_window[1] = 0;
display_window[2] = 0;
display_window[3] = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
} HeaderInfo;
static void ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
p = ReadString(&info.name, p);
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling));
channels.push_back(info);
}
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static void DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
assert(ret == Z_OK);
(void)ret;
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning( \
disable : 4267) // 'argument': conversion from '__int64' to 'int',
// possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressable run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
if (0 > (maxLength -= count)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static void DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
assert(ret == static_cast<int>(uncompressed_size));
(void)ret;
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
int hlink[HUF_ENCSIZE];
long long *fHeap[HUF_ENCSIZE];
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
long long scode[HUF_ENCSIZE];
memset(scode, 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode);
memcpy(frq, scode, sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode > ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#define getCode(po, rlc, c, lc, in, out, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out;
unsigned short *oe = out + no;
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
getCode(pl.p[j], rlc, c, lc, in, out, oe);
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(long long freq[HUF_ENCSIZE],
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
long long freq[HUF_ENCSIZE];
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq, &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq, im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq, raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
unsigned short raw[], int nRaw) {
if (nCompressed == 0) {
if (nRaw != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, nRaw, raw);
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap,
minNonZero, maxNonZero);
unsigned short lut[USHORT_RANGE];
unsigned short maxValue = forwardLutFromBitmap(bitmap, lut);
applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap, 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
unsigned short lut[USHORT_RANGE];
memset(lut, 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap, lut);
//
// Huffman decoding
//
int length;
length = *(reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer.at(0),
static_cast<int>(tmpBufSize));
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
int precision;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0f;
}
};
bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) &&
(attributes[i].size == 1)) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
}
}
if (!foundType) {
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else {
assert(0);
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
int num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size = dst_width * dst_num_lines * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((dst_width & 3U) || (dst_num_lines & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, dst_width, dst_num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = dst_width * dst_num_lines;
for (int c = 0; c < num_channels; c++) {
// decompress 4x4 pixel block.
for (int y = 0; y < dst_num_lines; y += 4) {
for (int x = 0; x < dst_width; x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * dst_width + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize,
const float *inPtr, int width, int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((width & 3U) || (num_lines & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, width, num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = width * num_lines;
for (int c = 0; c < num_channels; c++) {
// compress 4x4 pixel block.
for (int y = 0; y < num_lines; y += 4) {
for (int x = 0; x < width; x += 4) {
float fblock[16];
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * width + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = zfp_stream_compressed_size(zfp);
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
static void DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
assert(ret);
(void)ret;
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
tinyexr::DecompressZip(reinterpret_cast<unsigned char *>(&outBuf.at(0)),
&dstLen, data_ptr,
static_cast<unsigned long>(data_len));
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)),
dstLen, data_ptr,
static_cast<unsigned long>(data_len));
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param, attributes,
num_attributes)) {
assert(0);
return;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr +
c * static_cast<size_t>(width) * sizeof(unsigned short));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + c * static_cast<size_t>(width) * sizeof(float));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned int));
unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
static void DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
assert(tile_offset_x * tile_size_x < data_width);
assert(tile_offset_y * tile_size_y < data_height);
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static void ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
assert(0);
}
}
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
ReadChannelInfo(info->channels, data);
if (info->channels.size() < 1) {
if (err) {
(*err) = "# of channels is zero.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
} else if (attr_name.compare("displayWindow") == 0) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
} else if (attr_name.compare("lineOrder") == 0) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
} else if (attr_name.compare("pixelAspectRatio") == 0) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
} else if (attr_name.compare("screenWindowCenter") == 0) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
} else if (attr_name.compare("screenWindowWidth") == 0) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
} else if (attr_name.compare("chunkCount") == 0) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
} else {
// Custom attribute(up to TINYEXR_MAX_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window[0] = info.display_window[0];
exr_header->display_window[1] = info.display_window[1];
exr_header->display_window[2] = info.display_window[2];
exr_header->display_window[3] = info.display_window[3];
exr_header->data_window[0] = info.data_window[0];
exr_header->data_window[1] = info.data_window[1];
exr_header->data_window[2] = info.data_window[2];
exr_header->data_window[3] = info.data_window[3];
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
assert(info.attributes.size() < TINYEXR_MAX_ATTRIBUTES);
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy poiner
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels);
bool invalid_data = false;
if (exr_header->tiled) {
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
malloc(sizeof(EXRTile) * static_cast<size_t>(num_tiles)));
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
assert(tile_coordinates[2] == 0);
assert(tile_coordinates[3] == 0);
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
assert(data_len >= 4);
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, tile_coordinates[0],
tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y,
static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
exr_image->num_tiles = static_cast<int>(num_tiles);
}
} else { // scanline format
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
size_t y_idx = static_cast<size_t>(y);
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
assert(num_lines > 0);
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
line_no -= exr_header->data_window[1];
if (line_no < 0) {
invalid_data = true;
} else {
tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, data_width, y,
line_no, num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
}
} // omp parallel
}
if (invalid_data) {
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if (offset >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
// Read offset tables.
size_t num_blocks;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
if (err) {
(*err) = "Invalid offset value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
if (err) {
(*err) = "Cannot reconstruct lineOffset table.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
return DecodeChunk(exr_image, exr_header, offsets, head);
}
} // namespace tinyexr
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
if (out_rgba == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
if (err) {
(*err) = "Loading multipart or DeepImage is not supported yet.\n";
}
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) {
// Alpha channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
} else {
// Assume RGB(A)
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
#ifdef _WIN32
(*err) = _strdup(err_str.c_str()); // May leak
#else
(*err) = strdup(err_str.c_str()); // May leak
#endif
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
}
else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
if (err) {
(*err) = "EXRHeader is not initialized.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
if (err) {
(*err) = "Invalid argument.";
}
return 0; // @fixme
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "PIZ compression is not supported in this build.";
}
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
if (err) {
(*err) = "ZFP compression is not supported in this build.";
}
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
if (err) {
(*err) = "Pixel type must be FLOAT for ZFP compression.";
}
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<unsigned char> data;
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = f32.f;
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = h16.u;
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
1024 + static_cast<unsigned int>(
1.2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
data.insert(data.end(), data_list[i].begin(), data_list[i].end());
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
{ memory.insert(memory.end(), data.begin(), data.end()); }
assert(memory.size() > 0);
(*memory_out) = static_cast<unsigned char *>(malloc(memory.size()));
memcpy((*memory_out), &memory.at(0), memory.size());
return memory.size(); // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "PIZ compression is not supported in this build.";
}
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
if (err) {
(*err) = "ZFP compression is not supported in this build.";
}
return 0;
}
#endif
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "wb");
#else
FILE *fp = fopen(filename, "wb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot write a file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if ((mem_size > 0) && mem) {
fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _MSC_VER
FILE *fp = NULL;
errno_t errcode = fopen_s(&fp, filename, "rb");
if ((!errcode) || (!fp)) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
if (err) {
(*err) = "File size is zero.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
if (err) {
(*err) = "Invalid magic number.";
}
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
if (err) {
(*err) = "Unsupported version or scanline.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "Unsupported compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
tinyexr::ReadChannelInfo(channels, data);
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
if (err) {
(*err) = "Invalid channels format.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh));
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&h));
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
if (err) {
(*err) = "Unsupported format.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen,
data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize));
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize));
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui = *reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
f16.u = *reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f = *reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
if (err) {
(*err) = "fread error.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(err_str.c_str()); // may leak
#else
(*err) = strdup(err_str.c_str()); // may leak
#endif
}
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
if (err) {
(*err) = "`chunkCount' attribute is not found in the header.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
if (err) {
(*err) = "fread error.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
if (err) {
(*err) = "EXRHeader is not initialized.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
if (err) {
(*err) = "Invalid offset size.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
assert(0);
return TINYEXR_ERROR_INVALID_DATA;
}
}
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// Assume at least 16x16 pixels.
if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT;
if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT;
EXRHeader header;
InitEXRHeader(&header);
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
const char *err;
int ret = SaveEXRImageToFile(&image, &header, outfilename, &err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#endif // TINYEXR_IMPLEMENTATION_DEIFNED
#endif // TINYEXR_IMPLEMENTATION
|
GB_binop__lor_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint64)
// A*D function (colscale): GB (_AxD__lor_uint64)
// D*A function (rowscale): GB (_DxB__lor_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint64)
// C=scalar+B GB (_bind1st__lor_uint64)
// C=scalar+B' GB (_bind1st_tran__lor_uint64)
// C=A+scalar GB (_bind2nd__lor_uint64)
// C=A'+scalar GB (_bind2nd_tran__lor_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_UINT64 || GxB_NO_LOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SparseOperations_impl.h | // Copyright (c) 2004-2022 Tomáš Oberhuber et al.
//
// This file is part of TNL - Template Numerical Library (https://tnl-project.org/)
//
// SPDX-License-Identifier: MIT
// Implemented by: Jakub Klinkovský
#pragma once
#include <type_traits>
#include <stdexcept>
#include <algorithm>
#include <memory> // std::unique_ptr
#include <TNL/Pointers/DevicePointer.h>
#include <TNL/Algorithms/ParallelFor.h>
namespace TNL {
namespace Matrices {
#ifdef HAVE_CUDA
template< typename Vector, typename Matrix >
__global__
void
SparseMatrixSetRowLengthsVectorKernel( Vector* rowLengths,
const Matrix* matrix,
typename Matrix::IndexType rows,
typename Matrix::IndexType cols )
{
using IndexType = typename Matrix::IndexType;
IndexType rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
const IndexType gridSize = blockDim.x * gridDim.x;
while( rowIdx < rows ) {
const auto row = matrix->getRow( rowIdx );
IndexType length = 0;
for( IndexType c_j = 0; c_j < row.getSize(); c_j++ )
if( row.getColumnIndex( c_j ) < cols )
length++;
else
break;
rowLengths[ rowIdx ] = length;
rowIdx += gridSize;
}
}
template< typename Matrix1, typename Matrix2 >
__global__
void
SparseMatrixCopyKernel( Matrix1* A,
const Matrix2* B,
const typename Matrix2::IndexType* rowLengths,
typename Matrix2::IndexType rows )
{
using IndexType = typename Matrix2::IndexType;
IndexType rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
const IndexType gridSize = blockDim.x * gridDim.x;
while( rowIdx < rows ) {
const auto length = rowLengths[ rowIdx ];
const auto rowB = B->getRow( rowIdx );
auto rowA = A->getRow( rowIdx );
for( IndexType c = 0; c < length; c++ )
rowA.setElement( c, rowB.getColumnIndex( c ), rowB.getValue( c ) );
rowIdx += gridSize;
}
}
#endif
// copy on the same device
template< typename Matrix1, typename Matrix2 >
typename std::enable_if< std::is_same< typename Matrix1::DeviceType, typename Matrix2::DeviceType >::value >::type
copySparseMatrix_impl( Matrix1& A, const Matrix2& B )
{
static_assert( std::is_same< typename Matrix1::RealType, typename Matrix2::RealType >::value,
"The matrices must have the same RealType." );
static_assert( std::is_same< typename Matrix1::DeviceType, typename Matrix2::DeviceType >::value,
"The matrices must be allocated on the same device." );
static_assert( std::is_same< typename Matrix1::IndexType, typename Matrix2::IndexType >::value,
"The matrices must have the same IndexType." );
using RealType = typename Matrix1::RealType;
using DeviceType = typename Matrix1::DeviceType;
using IndexType = typename Matrix1::IndexType;
const IndexType rows = B.getRows();
const IndexType cols = B.getColumns();
A.setDimensions( rows, cols );
if( std::is_same< DeviceType, Devices::Host >::value ) {
// set row lengths
typename Matrix1::RowsCapacitiesType rowLengths;
rowLengths.setSize( rows );
#ifdef HAVE_OPENMP
#pragma omp parallel for if( Devices::Host::isOMPEnabled() )
#endif
for( IndexType i = 0; i < rows; i++ ) {
const auto row = B.getRow( i );
IndexType length = 0;
for( IndexType c_j = 0; c_j < row.getSize(); c_j++ )
if( row.getColumnIndex( c_j ) < cols )
length++;
else
break;
rowLengths[ i ] = length;
}
A.setRowCapacities( rowLengths );
#ifdef HAVE_OPENMP
#pragma omp parallel for if( Devices::Host::isOMPEnabled() )
#endif
for( IndexType i = 0; i < rows; i++ ) {
const auto length = rowLengths[ i ];
const auto rowB = B.getRow( i );
auto rowA = A.getRow( i );
for( IndexType c = 0; c < length; c++ )
rowA.setElement( c, rowB.getColumnIndex( c ), rowB.getValue( c ) );
}
}
if( std::is_same< DeviceType, Devices::Cuda >::value ) {
#ifdef HAVE_CUDA
dim3 blockSize( 256 );
dim3 gridSize;
const IndexType desGridSize = 32 * Cuda::DeviceInfo::getCudaMultiprocessors( Cuda::DeviceInfo::getActiveDevice() );
gridSize.x = min( desGridSize, Cuda::getNumberOfBlocks( rows, blockSize.x ) );
typename Matrix1::RowsCapacitiesType rowLengths;
rowLengths.setSize( rows );
Pointers::DevicePointer< Matrix1 > Apointer( A );
const Pointers::DevicePointer< const Matrix2 > Bpointer( B );
// set row lengths
Pointers::synchronizeSmartPointersOnDevice< Devices::Cuda >();
SparseMatrixSetRowLengthsVectorKernel<<< gridSize,
blockSize >>>( rowLengths.getData(), &Bpointer.template getData< TNL::Devices::Cuda >(), rows, cols );
TNL_CHECK_CUDA_DEVICE;
Apointer->setRowCapacities( rowLengths );
// copy rows
Pointers::synchronizeSmartPointersOnDevice< Devices::Cuda >();
SparseMatrixCopyKernel<<< gridSize,
blockSize >>>( &Apointer.template modifyData< TNL::Devices::Cuda >(),
&Bpointer.template getData< TNL::Devices::Cuda >(),
rowLengths.getData(),
rows );
TNL_CHECK_CUDA_DEVICE;
#else
throw Exceptions::CudaSupportMissing();
#endif
}
}
// cross-device copy (host -> gpu)
template< typename Matrix1, typename Matrix2 >
typename std::enable_if< ! std::is_same< typename Matrix1::DeviceType, typename Matrix2::DeviceType >::value
&& std::is_same< typename Matrix2::DeviceType, Devices::Host >::value >::type
copySparseMatrix_impl( Matrix1& A, const Matrix2& B )
{
using CudaMatrix2 = typename Matrix2::template Self< typename Matrix2::RealType, Devices::Cuda >;
CudaMatrix2 B_tmp;
B_tmp = B;
copySparseMatrix_impl( A, B_tmp );
}
// cross-device copy (gpu -> host)
template< typename Matrix1, typename Matrix2 >
typename std::enable_if< ! std::is_same< typename Matrix1::DeviceType, typename Matrix2::DeviceType >::value
&& std::is_same< typename Matrix2::DeviceType, Devices::Cuda >::value >::type
copySparseMatrix_impl( Matrix1& A, const Matrix2& B )
{
using CudaMatrix1 = typename Matrix1::template Self< typename Matrix1::RealType, Devices::Cuda >;
CudaMatrix1 A_tmp;
copySparseMatrix_impl( A_tmp, B );
A = A_tmp;
}
template< typename Matrix1, typename Matrix2 >
void
copySparseMatrix( Matrix1& A, const Matrix2& B )
{
copySparseMatrix_impl( A, B );
}
template< typename Matrix, typename AdjacencyMatrix >
void
copyAdjacencyStructure( const Matrix& A, AdjacencyMatrix& B, bool has_symmetric_pattern, bool ignore_diagonal )
{
static_assert( std::is_same< typename Matrix::DeviceType, Devices::Host >::value,
"The function is not implemented for CUDA matrices - it would require atomic insertions "
"of elements into the sparse format." );
static_assert( std::is_same< typename Matrix::DeviceType, typename AdjacencyMatrix::DeviceType >::value,
"The matrices must be allocated on the same device." );
static_assert( std::is_same< typename Matrix::IndexType, typename AdjacencyMatrix::IndexType >::value,
"The matrices must have the same IndexType." );
// static_assert( std::is_same< typename AdjacencyMatrix::RealType, bool >::value,
// "The RealType of the adjacency matrix must be bool." );
using IndexType = typename Matrix::IndexType;
if( A.getRows() != A.getColumns() ) {
throw std::logic_error( "The matrix is not square: " + std::to_string( A.getRows() ) + " rows, "
+ std::to_string( A.getColumns() ) + " columns." );
}
const IndexType N = A.getRows();
B.setDimensions( N, N );
// set row lengths
typename AdjacencyMatrix::RowsCapacitiesType rowLengths;
rowLengths.setSize( N );
rowLengths.setValue( 0 );
for( IndexType i = 0; i < A.getRows(); i++ ) {
const auto row = A.getRow( i );
IndexType length = 0;
for( int c_j = 0; c_j < row.getSize(); c_j++ ) {
const IndexType j = row.getColumnIndex( c_j );
if( j >= A.getColumns() )
break;
length++;
if( ! has_symmetric_pattern && i != j )
if( A.getElement( j, i ) == 0 )
rowLengths[ j ]++;
}
if( ignore_diagonal )
length--;
rowLengths[ i ] += length;
}
B.setRowCapacities( rowLengths );
// set non-zeros
for( IndexType i = 0; i < A.getRows(); i++ ) {
const auto row = A.getRow( i );
for( int c_j = 0; c_j < row.getSize(); c_j++ ) {
const IndexType j = row.getColumnIndex( c_j );
if( j >= A.getColumns() )
break;
if( ! ignore_diagonal || i != j )
if( A.getElement( i, j ) != 0 ) {
B.setElement( i, j, true );
if( ! has_symmetric_pattern )
B.setElement( j, i, true );
}
}
}
}
template< typename Matrix1, typename Matrix2, typename PermutationArray >
void
reorderSparseMatrix( const Matrix1& matrix1, Matrix2& matrix2, const PermutationArray& perm, const PermutationArray& iperm )
{
// TODO: implement on GPU
static_assert( std::is_same< typename Matrix1::DeviceType, Devices::Host >::value,
"matrix reordering is implemented only for host" );
static_assert( std::is_same< typename Matrix2::DeviceType, Devices::Host >::value,
"matrix reordering is implemented only for host" );
static_assert( std::is_same< typename PermutationArray::DeviceType, Devices::Host >::value,
"matrix reordering is implemented only for host" );
using IndexType = typename Matrix1::IndexType;
matrix2.setDimensions( matrix1.getRows(), matrix1.getColumns() );
// set row lengths
typename Matrix2::RowsCapacitiesType rowLengths;
rowLengths.setSize( matrix1.getRows() );
for( IndexType i = 0; i < matrix1.getRows(); i++ ) {
const auto row = matrix1.getRow( perm[ i ] );
IndexType length = 0;
for( IndexType j = 0; j < row.getSize(); j++ )
if( row.getColumnIndex( j ) < matrix1.getColumns() )
length++;
rowLengths[ i ] = length;
}
matrix2.setRowCapacities( rowLengths );
// set row elements
for( IndexType i = 0; i < matrix2.getRows(); i++ ) {
const IndexType rowLength = rowLengths[ i ];
// extract sparse row
const auto row1 = matrix1.getRow( perm[ i ] );
// permute
std::unique_ptr< typename Matrix2::IndexType[] > columns{ new typename Matrix2::IndexType[ rowLength ] };
std::unique_ptr< typename Matrix2::RealType[] > values{ new typename Matrix2::RealType[ rowLength ] };
for( IndexType j = 0; j < rowLength; j++ ) {
columns[ j ] = iperm[ row1.getColumnIndex( j ) ];
values[ j ] = row1.getValue( j );
}
// sort
std::unique_ptr< IndexType[] > indices{ new IndexType[ rowLength ] };
for( IndexType j = 0; j < rowLength; j++ )
indices[ j ] = j;
auto comparator = [ &columns ]( IndexType a, IndexType b )
{
return columns[ a ] < columns[ b ];
};
std::sort( indices.get(), indices.get() + rowLength, comparator );
// set the row
auto row2 = matrix2.getRow( i );
for( IndexType j = 0; j < rowLength; j++ )
row2.setElement( j, columns[ indices[ j ] ], values[ indices[ j ] ] );
}
}
template< typename Array1, typename Array2, typename PermutationArray >
void
reorderArray( const Array1& src, Array2& dest, const PermutationArray& perm )
{
static_assert( std::is_same< typename Array1::DeviceType, typename Array2::DeviceType >::value,
"Arrays must reside on the same device." );
static_assert( std::is_same< typename Array1::DeviceType, typename PermutationArray::DeviceType >::value,
"Arrays must reside on the same device." );
TNL_ASSERT_EQ( src.getSize(), perm.getSize(), "Source array and permutation must have the same size." );
TNL_ASSERT_EQ( dest.getSize(), perm.getSize(), "Destination array and permutation must have the same size." );
using DeviceType = typename Array1::DeviceType;
using IndexType = typename Array1::IndexType;
auto kernel = [] __cuda_callable__( IndexType i,
const typename Array1::ValueType* src,
typename Array2::ValueType* dest,
const typename PermutationArray::ValueType* perm )
{
dest[ i ] = src[ perm[ i ] ];
};
Algorithms::ParallelFor< DeviceType >::exec(
(IndexType) 0, src.getSize(), kernel, src.getData(), dest.getData(), perm.getData() );
}
} // namespace Matrices
} // namespace TNL
|
declare_reduction_ast_print.c | // RUN: %clang_cc1 -verify -fopenmp -ast-print %s | FileCheck %s
// RUN: %clang_cc1 -fopenmp -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -ast-print %s | FileCheck %s
// RUN: %clang_cc1 -fopenmp-simd -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: #pragma omp declare reduction (+ : int : omp_out *= omp_in){{$}}
// CHECK-NEXT: #pragma omp declare reduction (+ : char : omp_out *= omp_in)
#pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = omp_orig + 15)
// CHECK: #pragma omp declare reduction (fun : float : omp_out += omp_in) initializer(omp_priv = omp_orig + 15)
// CHECK: struct SSS {
struct SSS {
int field;
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: #pragma omp declare reduction (+ : int : omp_out *= omp_in)
// CHECK-NEXT: #pragma omp declare reduction (+ : char : omp_out *= omp_in)
};
// CHECK: };
void init(struct SSS *priv, struct SSS orig);
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: int main(void) {
int main(void) {
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
{
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
}
return 0;
}
// CHECK: }
#pragma omp declare reduction(mymin:int \
: omp_out = omp_out > omp_in ? omp_in : omp_out) \
initializer(omp_priv = 2147483647)
#pragma omp declare reduction(mymin \
: struct SSS \
: omp_out = omp_out.field > omp_in.field ? omp_in : omp_out)
int foo(int argc, char **argv) {
int x;
struct SSS ss;
#pragma omp parallel for reduction(mymin : x, ss)
for (int i = 0; i < 1000; i++)
;
return 0;
}
// CHECK: #pragma omp parallel for reduction(mymin: x,ss)
#endif
|
BFSFriends.h | #ifndef _BFS_FRIENDS_H_
#define _BFS_FRIENDS_H_
#include "mpi.h"
#include <iostream>
#include "SpParMat.h"
#include "SpParHelper.h"
#include "MPIType.h"
#include "Friends.h"
#include "OptBuf.h"
#include "ParFriends.h"
#include "SpImplNoSR.h"
#include "BitMap.h"
#include "BitMapCarousel.h"
#include "BitMapFringe.h"
using namespace std;
template <class IT, class NT, class DER>
class SpParMat;
/*************************************************************************************************/
/*********************** FRIEND FUNCTIONS FOR BFS ONLY (NO SEMIRINGS) RUNS **********************/
/***************************** BOTH PARALLEL AND SEQUENTIAL FUNCTIONS ****************************/
/*************************************************************************************************/
/**
* Multithreaded SpMV with sparse vector and preset buffers
* the assembly of outgoing buffers sendindbuf/sendnumbuf are done here
*/
template <typename IT, typename VT>
void dcsc_gespmv_threaded_setbuffers (const SpDCCols<IT, bool> & A, const int32_t * indx, const VT * numx, int32_t nnzx,
int32_t * sendindbuf, VT * sendnumbuf, int * cnts, int * dspls, int p_c)
{
if(A.getnnz() > 0 && nnzx > 0)
{
int splits = A.getnsplit();
if(splits > 0)
{
vector< vector<int32_t> > indy(splits);
vector< vector< VT > > numy(splits);
int32_t nlocrows = static_cast<int32_t>(A.getnrow());
int32_t perpiece = nlocrows / splits;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<splits; ++i)
{
if(i != splits-1)
SpMXSpV_ForThreading(*(A.GetDCSC(i)), perpiece, indx, numx, nnzx, indy[i], numy[i], i*perpiece);
else
SpMXSpV_ForThreading(*(A.GetDCSC(i)), nlocrows - perpiece*i, indx, numx, nnzx, indy[i], numy[i], i*perpiece);
}
int32_t perproc = nlocrows / p_c;
int32_t last_rec = p_c-1;
// keep recipients of last entries in each split (-1 for an empty split)
// so that we can delete indy[] and numy[] contents as soon as they are processed
vector<int32_t> end_recs(splits);
for(int i=0; i<splits; ++i)
{
if(indy[i].empty())
end_recs[i] = -1;
else
end_recs[i] = min(indy[i].back() / perproc, last_rec);
}
int ** loc_rec_cnts = new int *[splits];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<splits; ++i)
{
loc_rec_cnts[i] = new int[p_c](); // thread-local recipient data
if(!indy[i].empty()) // guarantee that .begin() and .end() are not null
{
int32_t cur_rec = min( indy[i].front() / perproc, last_rec);
int32_t lastdata = (cur_rec+1) * perproc; // one past last entry that goes to this current recipient
for(typename vector<int32_t>::iterator it = indy[i].begin(); it != indy[i].end(); ++it)
{
if( ( (*it) >= lastdata ) && cur_rec != last_rec)
{
cur_rec = min( (*it) / perproc, last_rec);
lastdata = (cur_rec+1) * perproc;
}
++loc_rec_cnts[i][cur_rec];
}
}
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<splits; ++i)
{
if(!indy[i].empty()) // guarantee that .begin() and .end() are not null
{
// FACT: Data is sorted, so if the recipient of begin is the same as the owner of end,
// then the whole data is sent to the same processor
int32_t beg_rec = min( indy[i].front() / perproc, last_rec);
int32_t alreadysent = 0; // already sent per recipient
for(int before = i-1; before >= 0; before--)
alreadysent += loc_rec_cnts[before][beg_rec];
if(beg_rec == end_recs[i]) // fast case
{
transform(indy[i].begin(), indy[i].end(), indy[i].begin(), bind2nd(minus<int32_t>(), perproc*beg_rec));
copy(indy[i].begin(), indy[i].end(), sendindbuf + dspls[beg_rec] + alreadysent);
copy(numy[i].begin(), numy[i].end(), sendnumbuf + dspls[beg_rec] + alreadysent);
}
else // slow case
{
int32_t cur_rec = beg_rec;
int32_t lastdata = (cur_rec+1) * perproc; // one past last entry that goes to this current recipient
for(typename vector<int32_t>::iterator it = indy[i].begin(); it != indy[i].end(); ++it)
{
if( ( (*it) >= lastdata ) && cur_rec != last_rec )
{
cur_rec = min( (*it) / perproc, last_rec);
lastdata = (cur_rec+1) * perproc;
// if this split switches to a new recipient after sending some data
// then it's sure that no data has been sent to that recipient yet
alreadysent = 0;
}
sendindbuf[ dspls[cur_rec] + alreadysent ] = (*it) - perproc*cur_rec; // convert to receiver's local index
sendnumbuf[ dspls[cur_rec] + (alreadysent++) ] = *(numy[i].begin() + (it-indy[i].begin()));
}
}
}
}
// Deallocated rec counts serially once all threads complete
for(int i=0; i< splits; ++i)
{
for(int j=0; j< p_c; ++j)
cnts[j] += loc_rec_cnts[i][j];
delete [] loc_rec_cnts[i];
}
delete [] loc_rec_cnts;
}
else
{
cout << "Something is wrong, splits should be nonzero for multithreaded execution" << endl;
}
}
}
/**
* Step 3 of the sparse SpMV algorithm, without the semiring (BFS only)
* @param[in,out] optbuf {scratch space for all-to-all (fold) communication}
* @param[in,out] indacc, numacc {index and values of the input vector, deleted upon exit}
* @param[in,out] sendindbuf, sendnumbuf {index and values of the output vector, created}
**/
template<typename VT, typename IT, typename UDER>
void LocalSpMV(const SpParMat<IT,bool,UDER> & A, int rowneighs, OptBuf<int32_t, VT > & optbuf, int32_t * & indacc, VT * & numacc, int * sendcnt, int accnz)
{
#ifdef TIMING
double t0=MPI_Wtime();
#endif
if(optbuf.totmax > 0) // graph500 optimization enabled
{
if(A.spSeq->getnsplit() > 0)
{
// optbuf.{inds/nums/dspls} and sendcnt are all pre-allocated and only filled by dcsc_gespmv_threaded
dcsc_gespmv_threaded_setbuffers (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs);
}
else
{
// by-pass dcsc_gespmv call
if(A.getlocalnnz() > 0 && accnz > 0)
{
SpMXSpV(*((A.spSeq)->GetDCSC()), (int32_t) A.getlocalrows(), indacc, numacc,
accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs, optbuf.isthere);
}
}
DeleteAll(indacc,numacc);
}
else
{
SpParHelper::Print("BFS only (no semiring) function only work with optimization buffers\n");
}
#ifdef TIMING
double t1=MPI_Wtime();
cblas_localspmvtime += (t1-t0);
#endif
}
template <typename IU, typename VT>
void MergeContributions(FullyDistSpVec<IU,VT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, VT * & recvnumbuf, int rowneighs)
{
#ifdef TIMING
double t0=MPI_Wtime();
#endif
// free memory of y, in case it was aliased
vector<IU>().swap(y.ind);
vector<VT>().swap(y.num);
#ifndef HEAPMERGE
IU ysize = y.MyLocLength(); // my local length is only O(n/p)
bool * isthere = new bool[ysize];
vector< pair<IU,VT> > ts_pairs;
fill_n(isthere, ysize, false);
// We don't need to keep a "merger" because minimum will always come from the processor
// with the smallest rank; so a linear sweep over the received buffer is enough
for(int i=0; i<rowneighs; ++i)
{
for(int j=0; j< recvcnt[i]; ++j)
{
int32_t index = recvindbuf[rdispls[i] + j];
if(!isthere[index])
ts_pairs.push_back(make_pair(index, recvnumbuf[rdispls[i] + j]));
}
}
DeleteAll(recvcnt, rdispls);
DeleteAll(isthere, recvindbuf, recvnumbuf);
__gnu_parallel::sort(ts_pairs.begin(), ts_pairs.end());
int nnzy = ts_pairs.size();
y.ind.resize(nnzy);
y.num.resize(nnzy);
for(int i=0; i< nnzy; ++i)
{
y.ind[i] = ts_pairs[i].first;
y.num[i] = ts_pairs[i].second;
}
#else
// Alternative 2: Heap-merge
int32_t hsize = 0;
int32_t inf = numeric_limits<int32_t>::min();
int32_t sup = numeric_limits<int32_t>::max();
KNHeap< int32_t, int32_t > sHeap(sup, inf);
int * processed = new int[rowneighs]();
for(int32_t i=0; i<rowneighs; ++i)
{
if(recvcnt[i] > 0)
{
// key, proc_id
sHeap.insert(recvindbuf[rdispls[i]], i);
++hsize;
}
}
int32_t key, locv;
if(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
y.ind.push_back( static_cast<IU>(key));
y.num.push_back(recvnumbuf[rdispls[locv]]); // nothing is processed yet
if( (++(processed[locv])) < recvcnt[locv] )
sHeap.insert(recvindbuf[rdispls[locv]+processed[locv]], locv);
else
--hsize;
}
// ofstream oput;
// y.commGrid->OpenDebugFile("Merge", oput);
// oput << "From displacements: "; copy(rdispls, rdispls+rowneighs, ostream_iterator<int>(oput, " ")); oput << endl;
// oput << "From counts: "; copy(recvcnt, recvcnt+rowneighs, ostream_iterator<int>(oput, " ")); oput << endl;
while(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
IU deref = rdispls[locv] + processed[locv];
if(y.ind.back() != static_cast<IU>(key)) // y.ind is surely not empty
{
y.ind.push_back(static_cast<IU>(key));
y.num.push_back(recvnumbuf[deref]);
}
if( (++(processed[locv])) < recvcnt[locv] )
sHeap.insert(recvindbuf[rdispls[locv]+processed[locv]], locv);
else
--hsize;
}
DeleteAll(recvcnt, rdispls,processed);
DeleteAll(recvindbuf, recvnumbuf);
#endif
#ifdef TIMING
double t1=MPI_Wtime();
cblas_mergeconttime += (t1-t0);
#endif
}
/**
* This is essentially a SpMV for BFS because it lacks the semiring.
* It naturally justs selects columns of A (adjacencies of frontier) and
* merges with the minimum entry succeeding. SpParMat has to be boolean
* input and output vectors are of type VT but their indices are IT
*/
template <typename VT, typename IT, typename UDER>
FullyDistSpVec<IT,VT> SpMV (const SpParMat<IT,bool,UDER> & A, const FullyDistSpVec<IT,VT> & x, OptBuf<int32_t, VT > & optbuf)
{
CheckSpMVCompliance(A,x);
optbuf.MarkEmpty();
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int accnz;
int32_t trxlocnz;
IT lenuntil;
int32_t *trxinds, *indacc;
VT *trxnums, *numacc;
#ifdef TIMING
double t0=MPI_Wtime();
#endif
TransposeVector(World, x, trxlocnz, lenuntil, trxinds, trxnums, true); // trxinds (and potentially trxnums) is allocated
#ifdef TIMING
double t1=MPI_Wtime();
cblas_transvectime += (t1-t0);
#endif
AllGatherVector(ColWorld, trxlocnz, lenuntil, trxinds, trxnums, indacc, numacc, accnz, true); // trxinds (and potentially trxnums) is deallocated, indacc/numacc allocated
FullyDistSpVec<IT, VT> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors
int rowneighs; MPI_Comm_size(RowWorld,&rowneighs);
int * sendcnt = new int[rowneighs]();
LocalSpMV(A, rowneighs, optbuf, indacc, numacc, sendcnt, accnz); // indacc/numacc deallocated
int * rdispls = new int[rowneighs];
int * recvcnt = new int[rowneighs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts
// receive displacements are exact whereas send displacements have slack
rdispls[0] = 0;
for(int i=0; i<rowneighs-1; ++i)
{
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
int totrecv = accumulate(recvcnt,recvcnt+rowneighs,0);
int32_t * recvindbuf = new int32_t[totrecv];
VT * recvnumbuf = new VT[totrecv];
#ifdef TIMING
double t2=MPI_Wtime();
#endif
//Commeting this statement below to avoid possibility
//of a deadlock in the alltoallv
//if(optbuf.totmax > 0 ) // graph500 optimization enabled
{
MPI_Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld);
MPI_Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<VT>(), recvnumbuf, recvcnt, rdispls, MPIType<VT>(), RowWorld);
delete [] sendcnt;
}
#ifdef TIMING
double t3=MPI_Wtime();
cblas_alltoalltime += (t3-t2);
#endif
MergeContributions(y,recvcnt, rdispls, recvindbuf, recvnumbuf, rowneighs);
return y;
}
template <typename VT, typename IT, typename UDER>
SpDCCols<int,bool>::SpColIter* CalcSubStarts(SpParMat<IT,bool,UDER> & A, FullyDistSpVec<IT,VT> & x, BitMapCarousel<IT,VT> &done) {
shared_ptr<CommGrid> cg = A.getcommgrid();
IT rowuntil = x.LengthUntil();
MPI_Comm RowWorld = cg->GetRowWorld();
MPI_Bcast(&rowuntil, 1, MPIType<IT>(), 0, RowWorld);
int numcols = cg->GetGridCols();
SpDCCols<int,bool>::SpColIter colit = A.seq().begcol();
#ifdef THREADED
SpDCCols<int,bool>::SpColIter* starts = new SpDCCols<int,bool>::SpColIter[numcols*cblas_splits+1];
for(int c=0; c<numcols; c++) {
IT curr_sub_start = done.GetGlobalStartOfLocal(c) - rowuntil;
IT next_sub_start = done.GetGlobalEndOfLocal(c) - rowuntil;
IT sub_range = next_sub_start - curr_sub_start;
IT per_thread = (sub_range + cblas_splits - 1) / cblas_splits;
IT curr_thread_start = curr_sub_start;
for (int t=0; t<cblas_splits; t++) {
while(colit.colid() < curr_thread_start) {
++colit;
}
starts[c*cblas_splits + t] = colit;
curr_thread_start = min(curr_thread_start + per_thread, next_sub_start);
}
}
starts[numcols*cblas_splits] = A.seq().endcol();
#else
SpDCCols<int,bool>::SpColIter* starts = new SpDCCols<int,bool>::SpColIter[numcols+1];
for(int c=0; c<numcols; c++) {
IT next_start = done.GetGlobalStartOfLocal(c) - rowuntil;
while(colit.colid() < next_start) {
++colit;
}
starts[c] = colit;
}
starts[numcols] = A.seq().endcol();
#endif
return starts;
}
template <typename VT, typename IT>
void UpdateParents(MPI_Comm & RowWorld, pair<IT,IT> *updates, int num_updates, FullyDistVec<IT,VT> &parents, int source, int dest, BitMapFringe<int64_t,int64_t> &bm_fringe) {
int send_words = num_updates<<1, recv_words;
MPI_Status status;
MPI_Sendrecv(&send_words, 1, MPI_INT, dest, PUPSIZE,
&recv_words, 1, MPI_INT, source, PUPSIZE, RowWorld, &status);
pair<IT,IT>* recv_buff = new pair<IT,IT>[recv_words>>1];
MPI_Sendrecv(updates, send_words, MPIType<IT>(), dest, PUPDATA,
recv_buff, recv_words, MPIType<IT>(), source, PUPDATA, RowWorld, &status);
#ifdef THREADED
#pragma omp parallel for
#endif
for (int i=0; i<recv_words>>1; i++) {
parents.SetLocalElement(recv_buff[i].first, recv_buff[i].second);
}
bm_fringe.IncrementNumSet((recv_words>>1));
delete[] recv_buff;
}
template <typename VT, typename IT, typename UDER>
void BottomUpStep(SpParMat<IT,bool,UDER> & A, FullyDistSpVec<IT,VT> & x, BitMapFringe<int64_t,int64_t> &bm_fringe, FullyDistVec<IT,VT> & parents, BitMapCarousel<IT,VT> &done, SpDCCols<int,bool>::SpColIter* starts)
{
shared_ptr<CommGrid> cg = A.getcommgrid();
MPI_Comm World = cg->GetWorld();
MPI_Comm ColWorld = cg->GetColWorld();
MPI_Comm RowWorld = cg->GetRowWorld();
MPI_Status status;
// get row and column offsets
IT rowuntil = x.LengthUntil(), my_coluntil = x.LengthUntil(), coluntil;
int diagneigh = cg->GetComplementRank();
MPI_Sendrecv(&my_coluntil, 1, MPIType<IT>(), diagneigh, TROST, &coluntil, 1, MPIType<IT>(), diagneigh, TROST, World, &status);
MPI_Bcast(&coluntil, 1, MPIType<IT>(), 0, ColWorld);
MPI_Bcast(&rowuntil, 1, MPIType<IT>(), 0, RowWorld);
BitMap* frontier = bm_fringe.TransposeGather();
done.SaveOld();
#ifdef THREADED
const int buff_size = 8192;
pair<IT,IT>* local_update_heads[cblas_splits];
for (int t=0; t<cblas_splits; t++)
local_update_heads[t] = new pair<IT,IT>[buff_size];
#endif
// do bottom up work
int numcols = cg->GetGridCols();
int mycol = cg->GetRankInProcRow();
pair<IT,IT>* parent_updates = new pair<IT,IT>[done.SizeOfChunk()<<1]; // over-allocated
for (int sub_step=0; sub_step<numcols; sub_step++) {
int num_updates = 0;
IT sub_start = done.GetGlobalStartOfLocal();
int dest_slice = (mycol + sub_step) % numcols;
int source_slice = (mycol - sub_step + numcols) % numcols;
double t1 = MPI_Wtime();
#ifdef THREADED
#pragma omp parallel
{
int id = omp_get_thread_num();
int num_locals=0;
SpDCCols<int,bool>::SpColIter::NzIter nzit, nzit_end;
SpDCCols<int,bool>::SpColIter colit, colit_end;
pair<IT,IT>* local_updates = local_update_heads[id];
// vector<pair<IT,IT> > local_updates;
colit_end = starts[dest_slice*cblas_splits + id + 1];
for(colit = starts[dest_slice*cblas_splits + id]; colit != colit_end; ++colit) {
int32_t local_row_ind = colit.colid();
IT row = local_row_ind + rowuntil;
if (!done.GetBit(row)) {
nzit_end = A.seq().endnz(colit);
for(nzit = A.seq().begnz(colit); nzit != nzit_end; ++nzit) {
int32_t local_col_ind = nzit.rowid();
IT col = local_col_ind + coluntil;
if (frontier->get_bit(local_col_ind)) {
// local_updates.push_back(make_pair(row-sub_start, col));
if (num_locals == buff_size) {
int copy_start = __sync_fetch_and_add(&num_updates, buff_size);
copy(local_updates, local_updates + buff_size, parent_updates + copy_start);
num_locals = 0;
}
local_updates[num_locals++] = make_pair(row-sub_start, col);
done.SetBit(row);
break;
}
}
}
}
int copy_start = __sync_fetch_and_add(&num_updates, num_locals);
copy(local_updates, local_updates + num_locals, parent_updates + copy_start);
}
#else
SpDCCols<int,bool>::SpColIter::NzIter nzit, nzit_end;
SpDCCols<int,bool>::SpColIter colit, colit_end;
colit_end = starts[dest_slice+1];
for(colit = starts[dest_slice]; colit != colit_end; ++colit)
{
int32_t local_row_ind = colit.colid();
IT row = local_row_ind + rowuntil;
if (!done.GetBit(row))
{
nzit_end = A.seq().endnz(colit);
for(nzit = A.seq().begnz(colit); nzit != nzit_end; ++nzit)
{
int32_t local_col_ind = nzit.rowid();
IT col = local_col_ind + coluntil;
if (frontier->get_bit(local_col_ind))
{
parent_updates[num_updates++] = make_pair(row-sub_start, col);
done.SetBit(row);
break;
}
} // end_for
} // end_if
} // end_for
#endif
#ifdef BOTTOMUPTIME
double t2 = MPI_Wtime();
bu_local += (t2-t1);
t1 = MPI_Wtime();
#endif
done.RotateAlongRow();
#ifdef BOTTOMUPTIME
t2 = MPI_Wtime();
bu_rotate += (t2-t1);
t1 = MPI_Wtime();
#endif
UpdateParents(RowWorld, parent_updates, num_updates, parents, source_slice, dest_slice, bm_fringe);
#ifdef BOTTOMUPTIME
t2 = MPI_Wtime();
bu_update += (t2-t1);
#endif
}
bm_fringe.LoadFromNext();
done.UpdateFringe(bm_fringe);
#ifdef THREADED
for (int t=0; t<cblas_splits; t++)
delete[] local_update_heads[t];
#endif
delete[] parent_updates;
}
#endif
|
SparseTranspose.h | /**
* This file contains (modified) code from the Eigen library.
* Eigen License:
*
* Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
* Copyright (C) 2007-2011 Benoit Jacob <jacob.benoit.1@gmail.com>
*
* This Source Code Form is subject to the terms of the Mozilla
* Public License v. 2.0. If a copy of the MPL was not distributed
* with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*
* ======================
*
* The modifications are part of the Eigen Recursive Matrix Extension (ERME).
* ERME License:
*
* Copyright (c) 2019 Darius Rückert
* Licensed under the MIT License.
*/
#pragma once
#include "SparseHelper.h"
#include "Transpose.h"
#include <iostream>
namespace Eigen::Recursive
{
/**
* Sparse Matrix Transposition.
* This is basically a copy and paste from Eigen/src/SparseCore/SparseMatrix.h :: operator=
*
* The only difference is that we call transpose recursivly on each element when assigning them.
*
* There are also two additional methods that only transpose the structure/values.
* This is used for optimization problems with well known structures. In these cases
* the structure can be precomputed.
*
*/
template <typename G, typename H, int options>
void transpose(const Eigen::SparseMatrix<G, options>& other, Eigen::SparseMatrix<H, options>& dest)
{
static_assert(options == Eigen::RowMajor, "todo");
using SparseMatrix = Eigen::SparseMatrix<G, Eigen::RowMajor>;
using namespace Eigen;
// SparseMatrix dest(other.rows(),other.cols());
// dest.resize(other.rows(), other.cols());
dest.resize(other.cols(), other.rows());
Eigen::Map<typename SparseMatrix::IndexVector>(dest.outerIndexPtr(), dest.outerSize()).setZero();
// pass 1
// FIXME the above copy could be merged with that pass
for (Index j = 0; j < other.outerSize(); ++j)
for (typename SparseMatrix::InnerIterator it(other, j); it; ++it) ++dest.outerIndexPtr()[it.index()];
// prefix sum
Index count = 0;
typename SparseMatrix::IndexVector positions(dest.outerSize());
for (Index j = 0; j < dest.outerSize(); ++j)
{
auto tmp = dest.outerIndexPtr()[j];
dest.outerIndexPtr()[j] = count;
positions[j] = count;
count += tmp;
}
dest.outerIndexPtr()[dest.outerSize()] = count;
// alloc
// dest.m_data.resize(count);
dest.reserve(count);
// pass 2
for (Index j = 0; j < other.outerSize(); ++j)
{
for (typename SparseMatrix::InnerIterator it(other, j); it; ++it)
{
Index pos = positions[it.index()]++;
dest.innerIndexPtr()[pos] = j;
dest.valuePtr()[pos].get() = transpose(it.value()).get();
}
}
}
template <typename G, typename H, int options>
void transposeStructureOnly(const Eigen::SparseMatrix<G, options>& other, Eigen::SparseMatrix<H, options>& dest)
{
static_assert(options == Eigen::RowMajor, "todo");
using SparseMatrix = Eigen::SparseMatrix<G, Eigen::RowMajor>;
using namespace Eigen;
// SparseMatrix dest(other.rows(),other.cols());
dest.resize(other.cols(), other.rows());
Eigen::Map<typename SparseMatrix::IndexVector>(dest.outerIndexPtr(), dest.outerSize()).setZero();
// pass 1
// FIXME the above copy could be merged with that pass
for (Index j = 0; j < other.outerSize(); ++j)
for (typename SparseMatrix::InnerIterator it(other, j); it; ++it) ++dest.outerIndexPtr()[it.index()];
// prefix sum
Index count = 0;
typename SparseMatrix::IndexVector positions(dest.outerSize());
for (Index j = 0; j < dest.outerSize(); ++j)
{
auto tmp = dest.outerIndexPtr()[j];
dest.outerIndexPtr()[j] = count;
positions[j] = count;
count += tmp;
}
dest.outerIndexPtr()[dest.outerSize()] = count;
// alloc
dest.reserve(count);
// pass 2
for (Index j = 0; j < other.outerSize(); ++j)
{
// int op = other.outerIndexPtr()[j];
int i = 0;
for (typename SparseMatrix::InnerIterator it(other, j); it; ++it, ++i)
{
Index pos = positions[it.index()]++;
dest.innerIndexPtr()[pos] = j;
}
}
}
template <typename G, typename H, int options>
void transposeStructureOnly_omp(const Eigen::SparseMatrix<G, options>& other, Eigen::SparseMatrix<H, options>& dest,
std::vector<int>& transposeTargets)
{
static_assert(options == Eigen::RowMajor, "todo");
using SparseMatrix = Eigen::SparseMatrix<G, Eigen::RowMajor>;
using namespace Eigen;
// SparseMatrix dest(other.rows(),other.cols());
dest.resize(other.cols(), other.rows());
Eigen::Map<typename SparseMatrix::IndexVector>(dest.outerIndexPtr(), dest.outerSize()).setZero();
// pass 1
// FIXME the above copy could be merged with that pass
for (Index j = 0; j < other.outerSize(); ++j)
for (typename SparseMatrix::InnerIterator it(other, j); it; ++it) ++dest.outerIndexPtr()[it.index()];
// prefix sum
Index count = 0;
typename SparseMatrix::IndexVector positions(dest.outerSize());
for (Index j = 0; j < dest.outerSize(); ++j)
{
auto tmp = dest.outerIndexPtr()[j];
dest.outerIndexPtr()[j] = count;
positions[j] = count;
count += tmp;
}
dest.outerIndexPtr()[dest.outerSize()] = count;
// alloc
dest.reserve(count);
transposeTargets.resize(count);
// pass 2
for (Index j = 0; j < other.outerSize(); ++j)
{
int op = other.outerIndexPtr()[j];
int i = 0;
for (typename SparseMatrix::InnerIterator it(other, j); it; ++it, ++i)
{
int rel = op + i;
Index pos = positions[it.index()]++;
transposeTargets[rel] = pos;
dest.innerIndexPtr()[pos] = j;
}
}
}
template <typename G, typename H, int options>
void transposeValueOnly(const Eigen::SparseMatrix<G, options>& other, Eigen::SparseMatrix<H, options>& dest)
{
static_assert(options == Eigen::RowMajor, "todo");
using SparseMatrix = Eigen::SparseMatrix<G, Eigen::RowMajor>;
using namespace Eigen;
std::vector<int> positions(dest.outerSize(), 0);
for (Index j = 0; j < other.outerSize(); ++j)
{
for (typename SparseMatrix::InnerIterator it(other, j); it; ++it)
{
Index pos = dest.outerIndexPtr()[it.index()] + positions[it.index()]++;
dest.valuePtr()[pos].get() = transpose(it.value()).get();
}
}
}
template <typename G, typename H, int options>
void transposeValueOnly_omp(const Eigen::SparseMatrix<G, options>& other, Eigen::SparseMatrix<H, options>& dest,
const std::vector<int>& transposeTargets)
{
static_assert(options == Eigen::RowMajor, "todo");
using SparseMatrix = Eigen::SparseMatrix<G, Eigen::RowMajor>;
using namespace Eigen;
// std::vector<int> positions(dest.outerSize(), 0);
#pragma omp for
for (Index j = 0; j < other.outerSize(); ++j)
{
int op = other.outerIndexPtr()[j];
int i = 0;
for (typename SparseMatrix::InnerIterator it(other, j); it; ++it, ++i)
{
int rel = op + i;
int pos = transposeTargets[rel];
// Index pos = dest.outerIndexPtr()[it.index()] + positions[it.index()]++;
dest.valuePtr()[pos].get() = transpose(it.value()).get();
}
}
}
} // namespace Eigen::Recursive
|
GB_binop__rminus_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int64)
// A*D function (colscale): GB (_AxD__rminus_int64)
// D*A function (rowscale): GB (_DxB__rminus_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int64)
// C=scalar+B GB (_bind1st__rminus_int64)
// C=scalar+B' GB (_bind1st_tran__rminus_int64)
// C=A+scalar GB (_bind2nd__rminus_int64)
// C=A'+scalar GB (_bind2nd_tran__rminus_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT64 || GxB_NO_RMINUS_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_int16
// op(A') function: GB_tran__lnot_uint16_int16
// C type: uint16_t
// A type: int16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_int16
(
uint16_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
integrate_padding_omp.c | #include <stdio.h>
#include <omp.h>
static long num_steps = 1000000000;
double step;
#define NUM_THREADS 2
#define PAD 8
int main()
{
int i, nThreads;
double pi, sum[NUM_THREADS][PAD];
step = 1.0/(double) num_steps;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
int i, ID, nThreadsInternal;
double x;
ID = omp_get_thread_num();
nThreadsInternal = omp_get_num_threads();
if(ID == 0) nThreads = nThreadsInternal;
for( i=ID, sum[ID][0]=0.0; i<num_steps; i=i+nThreadsInternal) {
x = (i+0.5)*step;
sum[ID][0] += 4.0/(1.0 + x*x);
}
}
for (i=0, pi=0.0; i<nThreads; i++)
pi += sum[i][0]*step;
printf("%f\n", pi);
}
|
SlicedLockBasedTraversal.h | /**
* @file SlicedLockBasedTraversal.h
*
* @date 09 Jan 2019
* @author seckler
*/
#pragma once
#include <numeric>
#include "autopas/containers/cellPairTraversals/SlicedBasedTraversal.h"
#include "autopas/utils/ThreeDimensionalMapping.h"
#include "autopas/utils/Timer.h"
#include "autopas/utils/WrapOpenMP.h"
namespace autopas {
/**
* This class provides the sliced traversal.
*
* The traversal finds the longest dimension of the simulation domain and cuts
* the domain into multiple slices along this dimension. Slices are
* assigned to the threads in a round robin fashion. Each thread locks the cells
* on the boundary wall to the previous slice with one lock. This lock is lifted
* as soon the boundary wall is fully processed.
*
* @tparam ParticleCell The type of cells.
* @tparam PairwiseFunctor The functor that defines the interaction of two particles.
* @tparam dataLayout
* @tparam useNewton3
* @tparam spaciallyForward Whether the base step only covers neigboring cells tha are spacially forward (for example
* c08)
*/
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3,
bool spaciallyForward>
class SlicedLockBasedTraversal
: public SlicedBasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3, spaciallyForward> {
public:
/**
* Constructor of the sliced traversal.
* @param dims The dimensions of the cellblock, i.e. the number of cells in x,
* y and z direction.
* @param pairwiseFunctor The functor that defines the interaction of two particles.
* @param interactionLength Interaction length (cutoff + skin).
* @param cellLength cell length.
*/
explicit SlicedLockBasedTraversal(const std::array<unsigned long, 3> &dims, PairwiseFunctor *pairwiseFunctor,
const double interactionLength, const std::array<double, 3> &cellLength)
: SlicedBasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3, spaciallyForward>(
dims, pairwiseFunctor, interactionLength, cellLength) {}
protected:
/**
* whether to use static or dynamic scheduling.
*/
bool _dynamic = true;
/**
* The main traversal of the sliced traversal.
*
* @copydetails C01BasedTraversal::c01Traversal()
*
*/
template <typename LoopBody>
inline void slicedTraversal(LoopBody &&loopBody);
};
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3,
bool spaciallyForward>
template <typename LoopBody>
void SlicedLockBasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3, spaciallyForward>::slicedTraversal(
LoopBody &&loopBody) {
using std::array;
auto numSlices = this->_sliceThickness.size();
std::vector<AutoPasLock> locks;
locks.resize((numSlices - 1) * this->_overlapLongestAxis);
// 0) check if applicable
std::array<size_t, 2> overLapps23{this->_overlap[this->_dimsPerLength[1]], this->_overlap[this->_dimsPerLength[2]]};
if (not spaciallyForward) {
overLapps23 = {0ul, 0ul};
}
std::vector<utils::Timer> timers;
std::vector<double> threadTimes;
timers.resize(numSlices);
threadTimes.resize(numSlices);
#ifdef AUTOPAS_OPENMP
// although every thread gets exactly one iteration (=slice) this is faster than a normal parallel region
auto numThreads = static_cast<size_t>(autopas_get_max_threads());
if (this->_dynamic) {
omp_set_schedule(omp_sched_dynamic, 1);
} else {
omp_set_schedule(omp_sched_static, 1);
}
#pragma omp parallel for schedule(runtime) num_threads(numThreads)
#endif
for (size_t slice = 0; slice < numSlices; ++slice) {
timers[slice].start();
array<unsigned long, 3> myStartArray{0, 0, 0};
for (size_t i = 0; i < slice; ++i) {
myStartArray[this->_dimsPerLength[0]] += this->_sliceThickness[i];
}
// all but the first slice need to lock their starting layers.
const unsigned long lockBaseIndex = (slice - 1) * this->_overlapLongestAxis;
if (slice > 0) {
for (unsigned long i = 0ul; i < this->_overlapLongestAxis; i++) {
locks[lockBaseIndex + i].lock();
}
}
const auto lastLayer = myStartArray[this->_dimsPerLength[0]] + this->_sliceThickness[slice];
for (unsigned long sliceOffset = 0ul; sliceOffset < this->_sliceThickness[slice]; ++sliceOffset) {
const auto dimSlice = myStartArray[this->_dimsPerLength[0]] + sliceOffset;
// at the last layers request lock for the starting layer of the next
// slice. Does not apply for the last slice.
if (slice != numSlices - 1 and dimSlice >= lastLayer - this->_overlapLongestAxis) {
locks[((slice + 1) * this->_overlapLongestAxis) - (lastLayer - dimSlice)].lock();
}
for (unsigned long dimMedium = 0; dimMedium < this->_cellsPerDimension[this->_dimsPerLength[1]] - overLapps23[0];
++dimMedium) {
for (unsigned long dimShort = 0; dimShort < this->_cellsPerDimension[this->_dimsPerLength[2]] - overLapps23[1];
++dimShort) {
array<unsigned long, 3> idArray = {};
idArray[this->_dimsPerLength[0]] = dimSlice;
idArray[this->_dimsPerLength[1]] = dimMedium;
idArray[this->_dimsPerLength[2]] = dimShort;
loopBody(idArray[0], idArray[1], idArray[2]);
}
}
// at the end of the first layers release the lock
if (slice > 0 and dimSlice < myStartArray[this->_dimsPerLength[0]] + this->_overlapLongestAxis) {
locks[lockBaseIndex + sliceOffset].unlock();
// if lastLayer is reached within overlap area, unlock all following locks
// this should never be the case if slice thicknesses are set up properly; thickness should always be
// greater than the overlap along the longest axis, or the slices won't be processed in parallel.
if (dimSlice == lastLayer - 1) {
for (unsigned long i = sliceOffset + 1; i < this->_overlapLongestAxis; ++i) {
locks[lockBaseIndex + i].unlock();
}
}
} else if (slice != numSlices - 1 and dimSlice == lastLayer - 1) {
// clearing of the locks set on the last layers of each slice
for (size_t i = (slice * this->_overlapLongestAxis); i < (slice + 1) * this->_overlapLongestAxis; ++i) {
locks[i].unlock();
}
}
}
threadTimes[slice] = timers[slice].stop();
}
std::string timesStr;
for (auto t : threadTimes) {
timesStr += std::to_string(t) + ", ";
}
auto minMax = std::minmax_element(threadTimes.begin(), threadTimes.end());
auto avg = std::accumulate(threadTimes.begin(), threadTimes.end(), 0.0) / numSlices;
auto variance = std::accumulate(threadTimes.cbegin(), threadTimes.cend(), 0.0,
[avg](double a, double b) -> double { return a + std::pow(avg - b, 2.0); }) /
numSlices;
auto stddev = std::sqrt(variance);
AutoPasLog(debug, "times per slice: [{}].", timesStr);
AutoPasLog(debug, "Difference between longest and shortest time: {:.3G}", *minMax.second - *minMax.first);
AutoPasLog(debug, "Ratio between longest and shortest time: {:.3G}", (float)*minMax.second / *minMax.first);
AutoPasLog(debug, "avg: {:.3G}, std-deviation: {:.3G} ({:.3G}%)", avg, stddev, 100 * stddev / avg);
}
} // namespace autopas
|
nditer_api.c | /*
* This file implements most of the main API functions of NumPy's nditer.
* This excludes functions specialized using the templating system.
*
* Copyright (c) 2010-2011 by Mark Wiebe (mwwiebe@gmail.com)
* The University of British Columbia
*
* Copyright (c) 2011 Enthought, Inc
*
* See LICENSE.txt for the license.
*/
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#include <offload.h>
/* Indicate that this .c file is allowed to include the header */
#define MPY_ITERATOR_IMPLEMENTATION_CODE
#include "nditer_impl.h"
#include "templ_common.h"
#include <numpy/npy_common.h>
#include "mpy_common.h"
#include "common.h"
#include "nditer.h"
#include "creators.h"
/* Internal helper functions private to this file */
static npy_intp
npyiter_checkreducesize(MpyIter *iter, npy_intp count,
npy_intp *reduce_innersize,
npy_intp *reduce_outerdim);
/*NUMPY_API
* Removes an axis from iteration. This requires that NPY_ITER_MULTI_INDEX
* was set for iterator creation, and does not work if buffering is
* enabled. This function also resets the iterator to its initial state.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
NPY_NO_EXPORT int
MpyIter_RemoveAxis(MpyIter *iter, int axis)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int iop, nop = NIT_NOP(iter);
int xdim = 0;
npy_int8 *perm = NIT_PERM(iter);
NpyIter_AxisData *axisdata_del = NIT_AXISDATA(iter), *axisdata;
npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
npy_intp *baseoffsets = NIT_BASEOFFSETS(iter);
char **resetdataptr = NIT_RESETDATAPTR(iter);
if (!(itflags&NPY_ITFLAG_HASMULTIINDEX)) {
PyErr_SetString(PyExc_RuntimeError,
"Iterator RemoveAxis may only be called "
"if a multi-index is being tracked");
return NPY_FAIL;
}
else if (itflags&NPY_ITFLAG_HASINDEX) {
PyErr_SetString(PyExc_RuntimeError,
"Iterator RemoveAxis may not be called on "
"an index is being tracked");
return NPY_FAIL;
}
else if (itflags&NPY_ITFLAG_BUFFER) {
PyErr_SetString(PyExc_RuntimeError,
"Iterator RemoveAxis may not be called on "
"a buffered iterator");
return NPY_FAIL;
}
else if (axis < 0 || axis >= ndim) {
PyErr_SetString(PyExc_ValueError,
"axis out of bounds in iterator RemoveAxis");
return NPY_FAIL;
}
/* Reverse axis, since the iterator treats them that way */
axis = ndim - 1 - axis;
/* First find the axis in question */
for (idim = 0; idim < ndim; ++idim) {
/* If this is it, and it's iterated forward, done */
if (perm[idim] == axis) {
xdim = idim;
break;
}
/* If this is it, but it's iterated backward, must reverse the axis */
else if (-1 - perm[idim] == axis) {
npy_intp *strides = NAD_STRIDES(axisdata_del);
npy_intp shape = NAD_SHAPE(axisdata_del), offset;
xdim = idim;
/*
* Adjust baseoffsets and resetbaseptr back to the start of
* this axis.
*/
for (iop = 0; iop < nop; ++iop) {
offset = (shape-1)*strides[iop];
baseoffsets[iop] += offset;
resetdataptr[iop] += offset;
}
break;
}
NIT_ADVANCE_AXISDATA(axisdata_del, 1);
}
if (idim == ndim) {
PyErr_SetString(PyExc_RuntimeError,
"internal error in iterator perm");
return NPY_FAIL;
}
/* Adjust the permutation */
for (idim = 0; idim < ndim-1; ++idim) {
npy_int8 p = (idim < xdim) ? perm[idim] : perm[idim+1];
if (p >= 0) {
if (p > axis) {
--p;
}
}
else if (p <= 0) {
if (p < -1-axis) {
++p;
}
}
perm[idim] = p;
}
/* Shift all the axisdata structures by one */
axisdata = NIT_INDEX_AXISDATA(axisdata_del, 1);
memmove(axisdata_del, axisdata, (ndim-1-xdim)*sizeof_axisdata);
/* Adjust the iteration size and reset iterend */
NIT_ITERSIZE(iter) = 1;
axisdata = NIT_AXISDATA(iter);
for (idim = 0; idim < ndim-1; ++idim) {
if (npy_mul_with_overflow_intp(&NIT_ITERSIZE(iter),
NIT_ITERSIZE(iter), NAD_SHAPE(axisdata))) {
NIT_ITERSIZE(iter) = -1;
break;
}
NIT_ADVANCE_AXISDATA(axisdata, 1);
}
NIT_ITEREND(iter) = NIT_ITERSIZE(iter);
/* Shrink the iterator */
NIT_NDIM(iter) = ndim - 1;
/* If it is now 0-d fill the singleton dimension */
if (ndim == 1) {
npy_intp *strides = NAD_STRIDES(axisdata_del);
NAD_SHAPE(axisdata_del) = 1;
for (iop = 0; iop < nop; ++iop) {
strides[iop] = 0;
}
NIT_ITFLAGS(iter) |= NPY_ITFLAG_ONEITERATION;
}
return MpyIter_Reset(iter, NULL);
}
/*NUMPY_API
* Removes multi-index support from an iterator.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
NPY_NO_EXPORT int
MpyIter_RemoveMultiIndex(MpyIter *iter)
{
npy_uint32 itflags;
/* Make sure the iterator is reset */
if (MpyIter_Reset(iter, NULL) != NPY_SUCCEED) {
return NPY_FAIL;
}
itflags = NIT_ITFLAGS(iter);
if (itflags&NPY_ITFLAG_HASMULTIINDEX) {
if (NIT_ITERSIZE(iter) < 0) {
PyErr_SetString(PyExc_ValueError, "iterator is too large");
return NPY_FAIL;
}
NIT_ITFLAGS(iter) = itflags & ~NPY_ITFLAG_HASMULTIINDEX;
mpyiter_coalesce_axes(iter);
}
mpyiter_update_offiter(iter);
return NPY_SUCCEED;
}
/*NUMPY_API
* Removes the inner loop handling (so HasExternalLoop returns true)
*/
NPY_NO_EXPORT int
MpyIter_EnableExternalLoop(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
/* Check conditions under which this can be done */
if (itflags&(NPY_ITFLAG_HASINDEX|NPY_ITFLAG_HASMULTIINDEX)) {
PyErr_SetString(PyExc_ValueError,
"Iterator flag EXTERNAL_LOOP cannot be used "
"if an index or multi-index is being tracked");
return NPY_FAIL;
}
if ((itflags&(NPY_ITFLAG_BUFFER|NPY_ITFLAG_RANGE|NPY_ITFLAG_EXLOOP))
== (NPY_ITFLAG_RANGE|NPY_ITFLAG_EXLOOP)) {
PyErr_SetString(PyExc_ValueError,
"Iterator flag EXTERNAL_LOOP cannot be used "
"with ranged iteration unless buffering is also enabled");
return NPY_FAIL;
}
/* Set the flag */
if (!(itflags&NPY_ITFLAG_EXLOOP)) {
itflags |= NPY_ITFLAG_EXLOOP;
NIT_ITFLAGS(iter) = itflags;
/*
* Check whether we can apply the single iteration
* optimization to the iternext function.
*/
if (!(itflags&NPY_ITFLAG_BUFFER)) {
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
if (NIT_ITERSIZE(iter) == NAD_SHAPE(axisdata)) {
NIT_ITFLAGS(iter) |= NPY_ITFLAG_ONEITERATION;
}
}
}
/* Reset the iterator */
return MpyIter_Reset(iter, NULL);
}
/*NUMPY_API
* Resets the iterator to its initial state
*
* If errmsg is non-NULL, it should point to a variable which will
* receive the error message, and no Python exception will be set.
* This is so that the function can be called from code not holding
* the GIL.
*/
NPY_NO_EXPORT int
MpyIter_Reset(MpyIter *iter, char **errmsg)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *bufferdata;
/* If buffer allocation was delayed, do it now */
if (itflags&NPY_ITFLAG_DELAYBUF) {
if (!mpyiter_allocate_buffers(iter, errmsg)) {
return NPY_FAIL;
}
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_DELAYBUF;
}
else {
/*
* If the iterindex is already right, no need to
* do anything
*/
bufferdata = NIT_BUFFERDATA(iter);
if (NIT_ITERINDEX(iter) == NIT_ITERSTART(iter) &&
NBF_BUFITEREND(bufferdata) <= NIT_ITEREND(iter) &&
NBF_SIZE(bufferdata) > 0) {
return NPY_SUCCEED;
}
/* Copy any data from the buffers back to the arrays */
mpyiter_copy_from_buffers(iter);
}
}
mpyiter_goto_iterindex(iter, NIT_ITERSTART(iter));
if (itflags&NPY_ITFLAG_BUFFER) {
/* Prepare the next buffers and set iterend/size */
mpyiter_copy_to_buffers(iter, NULL);
}
/* Update offload iter */
mpyiter_update_offiter(iter);
return NPY_SUCCEED;
}
/*NUMPY_API
* Resets the iterator to its initial state, with new base data pointers.
* This function requires great caution.
*
* If errmsg is non-NULL, it should point to a variable which will
* receive the error message, and no Python exception will be set.
* This is so that the function can be called from code not holding
* the GIL.
*/
NPY_NO_EXPORT int
MpyIter_ResetBasePointers(MpyIter *iter, char **baseptrs, char **errmsg)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int iop, nop = NIT_NOP(iter);
char **resetdataptr = NIT_RESETDATAPTR(iter);
npy_intp *baseoffsets = NIT_BASEOFFSETS(iter);
if (itflags&NPY_ITFLAG_BUFFER) {
/* If buffer allocation was delayed, do it now */
if (itflags&NPY_ITFLAG_DELAYBUF) {
if (!mpyiter_allocate_buffers(iter, errmsg)) {
return NPY_FAIL;
}
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_DELAYBUF;
}
else {
/* Copy any data from the buffers back to the arrays */
mpyiter_copy_from_buffers(iter);
}
}
/* The new data pointers for resetting */
for (iop = 0; iop < nop; ++iop) {
resetdataptr[iop] = baseptrs[iop] + baseoffsets[iop];
}
mpyiter_goto_iterindex(iter, NIT_ITERSTART(iter));
if (itflags&NPY_ITFLAG_BUFFER) {
/* Prepare the next buffers and set iterend/size */
mpyiter_copy_to_buffers(iter, NULL);
}
/* Update offload iter */
mpyiter_update_offiter(iter);
return NPY_SUCCEED;
}
/*NUMPY_API
* Resets the iterator to a new iterator index range
*
* If errmsg is non-NULL, it should point to a variable which will
* receive the error message, and no Python exception will be set.
* This is so that the function can be called from code not holding
* the GIL.
*/
NPY_NO_EXPORT int
MpyIter_ResetToIterIndexRange(MpyIter *iter,
npy_intp istart, npy_intp iend, char **errmsg)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
/*int nop = NIT_NOP(iter);*/
if (!(itflags&NPY_ITFLAG_RANGE)) {
if (errmsg == NULL) {
PyErr_SetString(PyExc_ValueError,
"Cannot call ResetToIterIndexRange on an iterator without "
"requesting ranged iteration support in the constructor");
}
else {
*errmsg = "Cannot call ResetToIterIndexRange on an iterator "
"without requesting ranged iteration support in the "
"constructor";
}
return NPY_FAIL;
}
if (istart < 0 || iend > NIT_ITERSIZE(iter)) {
if (NIT_ITERSIZE(iter) < 0) {
if (errmsg == NULL) {
PyErr_SetString(PyExc_ValueError, "iterator is too large");
}
else {
*errmsg = "iterator is too large";
}
return NPY_FAIL;
}
if (errmsg == NULL) {
PyErr_Format(PyExc_ValueError,
"Out-of-bounds range [%d, %d) passed to "
"ResetToIterIndexRange", (int)istart, (int)iend);
}
else {
*errmsg = "Out-of-bounds range passed to ResetToIterIndexRange";
}
return NPY_FAIL;
}
else if (iend < istart) {
if (errmsg == NULL) {
PyErr_Format(PyExc_ValueError,
"Invalid range [%d, %d) passed to ResetToIterIndexRange",
(int)istart, (int)iend);
}
else {
*errmsg = "Invalid range passed to ResetToIterIndexRange";
}
return NPY_FAIL;
}
NIT_ITERSTART(iter) = istart;
NIT_ITEREND(iter) = iend;
return MpyIter_Reset(iter, errmsg);
}
/*NUMPY_API
* Sets the iterator to the specified multi-index, which must have the
* correct number of entries for 'ndim'. It is only valid
* when NPY_ITER_MULTI_INDEX was passed to the constructor. This operation
* fails if the multi-index is out of bounds.
*
* Returns NPY_SUCCEED on success, NPY_FAIL on failure.
*/
NPY_NO_EXPORT int
MpyIter_GotoMultiIndex(MpyIter *iter, npy_intp *multi_index)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
npy_intp iterindex, factor;
NpyIter_AxisData *axisdata;
npy_intp sizeof_axisdata;
npy_int8 *perm;
if (!(itflags&NPY_ITFLAG_HASMULTIINDEX)) {
PyErr_SetString(PyExc_ValueError,
"Cannot call GotoMultiIndex on an iterator without "
"requesting a multi-index in the constructor");
return NPY_FAIL;
}
if (itflags&NPY_ITFLAG_BUFFER) {
PyErr_SetString(PyExc_ValueError,
"Cannot call GotoMultiIndex on an iterator which "
"is buffered");
return NPY_FAIL;
}
if (itflags&NPY_ITFLAG_EXLOOP) {
PyErr_SetString(PyExc_ValueError,
"Cannot call GotoMultiIndex on an iterator which "
"has the flag EXTERNAL_LOOP");
return NPY_FAIL;
}
perm = NIT_PERM(iter);
axisdata = NIT_AXISDATA(iter);
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
/* Compute the iterindex corresponding to the multi-index */
iterindex = 0;
factor = 1;
for (idim = 0; idim < ndim; ++idim) {
npy_int8 p = perm[idim];
npy_intp i, shape;
shape = NAD_SHAPE(axisdata);
if (p < 0) {
/* If the perm entry is negative, reverse the index */
i = shape - multi_index[ndim+p] - 1;
}
else {
i = multi_index[ndim-p-1];
}
/* Bounds-check this index */
if (i >= 0 && i < shape) {
iterindex += factor * i;
factor *= shape;
}
else {
PyErr_SetString(PyExc_IndexError,
"Iterator GotoMultiIndex called with an out-of-bounds "
"multi-index");
return NPY_FAIL;
}
NIT_ADVANCE_AXISDATA(axisdata, 1);
}
if (iterindex < NIT_ITERSTART(iter) || iterindex >= NIT_ITEREND(iter)) {
if (NIT_ITERSIZE(iter) < 0) {
PyErr_SetString(PyExc_ValueError, "iterator is too large");
return NPY_FAIL;
}
PyErr_SetString(PyExc_IndexError,
"Iterator GotoMultiIndex called with a multi-index outside the "
"restricted iteration range");
return NPY_FAIL;
}
mpyiter_goto_iterindex(iter, iterindex);
/* Update offload iter */
mpyiter_update_offiter(iter);
return NPY_SUCCEED;
}
/*NUMPY_API
* If the iterator is tracking an index, sets the iterator
* to the specified index.
*
* Returns NPY_SUCCEED on success, NPY_FAIL on failure.
*/
NPY_NO_EXPORT int
MpyIter_GotoIndex(MpyIter *iter, npy_intp flat_index)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
npy_intp iterindex, factor;
NpyIter_AxisData *axisdata;
npy_intp sizeof_axisdata;
if (!(itflags&NPY_ITFLAG_HASINDEX)) {
PyErr_SetString(PyExc_ValueError,
"Cannot call GotoIndex on an iterator without "
"requesting a C or Fortran index in the constructor");
return NPY_FAIL;
}
if (itflags&NPY_ITFLAG_BUFFER) {
PyErr_SetString(PyExc_ValueError,
"Cannot call GotoIndex on an iterator which "
"is buffered");
return NPY_FAIL;
}
if (itflags&NPY_ITFLAG_EXLOOP) {
PyErr_SetString(PyExc_ValueError,
"Cannot call GotoIndex on an iterator which "
"has the flag EXTERNAL_LOOP");
return NPY_FAIL;
}
if (flat_index < 0 || flat_index >= NIT_ITERSIZE(iter)) {
PyErr_SetString(PyExc_IndexError,
"Iterator GotoIndex called with an out-of-bounds "
"index");
return NPY_FAIL;
}
axisdata = NIT_AXISDATA(iter);
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
/* Compute the iterindex corresponding to the flat_index */
iterindex = 0;
factor = 1;
for (idim = 0; idim < ndim; ++idim) {
npy_intp i, shape, iterstride;
iterstride = NAD_STRIDES(axisdata)[nop];
shape = NAD_SHAPE(axisdata);
/* Extract the index from the flat_index */
if (iterstride == 0) {
i = 0;
}
else if (iterstride < 0) {
i = shape - (flat_index/(-iterstride))%shape - 1;
}
else {
i = (flat_index/iterstride)%shape;
}
/* Add its contribution to iterindex */
iterindex += factor * i;
factor *= shape;
NIT_ADVANCE_AXISDATA(axisdata, 1);
}
if (iterindex < NIT_ITERSTART(iter) || iterindex >= NIT_ITEREND(iter)) {
PyErr_SetString(PyExc_IndexError,
"Iterator GotoIndex called with an index outside the "
"restricted iteration range.");
return NPY_FAIL;
}
mpyiter_goto_iterindex(iter, iterindex);
/* Update offload iter */
mpyiter_update_offiter(iter);
return NPY_SUCCEED;
}
/*NUMPY_API
* Sets the iterator position to the specified iterindex,
* which matches the iteration order of the iterator.
*
* Returns NPY_SUCCEED on success, NPY_FAIL on failure.
*/
NPY_NO_EXPORT int
MpyIter_GotoIterIndex(MpyIter *iter, npy_intp iterindex)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int iop, nop = NIT_NOP(iter);
if (itflags&NPY_ITFLAG_EXLOOP) {
PyErr_SetString(PyExc_ValueError,
"Cannot call GotoIterIndex on an iterator which "
"has the flag EXTERNAL_LOOP");
return NPY_FAIL;
}
if (iterindex < NIT_ITERSTART(iter) || iterindex >= NIT_ITEREND(iter)) {
if (NIT_ITERSIZE(iter) < 0) {
PyErr_SetString(PyExc_ValueError, "iterator is too large");
return NPY_FAIL;
}
PyErr_SetString(PyExc_IndexError,
"Iterator GotoIterIndex called with an iterindex outside the "
"iteration range.");
return NPY_FAIL;
}
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
npy_intp bufiterend, size;
size = NBF_SIZE(bufferdata);
bufiterend = NBF_BUFITEREND(bufferdata);
/* Check if the new iterindex is already within the buffer */
if (!(itflags&NPY_ITFLAG_REDUCE) && iterindex < bufiterend &&
iterindex >= bufiterend - size) {
npy_intp *strides, delta;
char **ptrs;
strides = NBF_STRIDES(bufferdata);
ptrs = NBF_PTRS(bufferdata);
delta = iterindex - NIT_ITERINDEX(iter);
for (iop = 0; iop < nop; ++iop) {
ptrs[iop] += delta * strides[iop];
}
NIT_ITERINDEX(iter) = iterindex;
}
/* Start the buffer at the provided iterindex */
else {
/* Write back to the arrays */
mpyiter_copy_from_buffers(iter);
mpyiter_goto_iterindex(iter, iterindex);
/* Prepare the next buffers and set iterend/size */
mpyiter_copy_to_buffers(iter, NULL);
}
}
else {
mpyiter_goto_iterindex(iter, iterindex);
}
/* Update offload iter */
mpyiter_update_offiter(iter);
return NPY_SUCCEED;
}
/*NUMPY_API
* Gets the current iteration index
*/
NPY_NO_EXPORT npy_intp
MpyIter_GetIterIndex(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
/* iterindex is only used if NPY_ITER_RANGED or NPY_ITER_BUFFERED was set */
if (itflags&(NPY_ITFLAG_RANGE|NPY_ITFLAG_BUFFER)) {
return NIT_ITERINDEX(iter);
}
else {
npy_intp iterindex;
NpyIter_AxisData *axisdata;
npy_intp sizeof_axisdata;
iterindex = 0;
if (ndim == 0) {
return 0;
}
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
axisdata = NIT_INDEX_AXISDATA(NIT_AXISDATA(iter), ndim-1);
for (idim = ndim-2; idim >= 0; --idim) {
iterindex += NAD_INDEX(axisdata);
NIT_ADVANCE_AXISDATA(axisdata, -1);
iterindex *= NAD_SHAPE(axisdata);
}
iterindex += NAD_INDEX(axisdata);
return iterindex;
}
}
/*NUMPY_API
* Whether the buffer allocation is being delayed
*/
NPY_NO_EXPORT npy_bool
MpyIter_HasDelayedBufAlloc(MpyIter *iter)
{
return (NIT_ITFLAGS(iter)&NPY_ITFLAG_DELAYBUF) != 0;
}
/*NUMPY_API
* Whether the iterator handles the inner loop
*/
NPY_NO_EXPORT npy_bool
MpyIter_HasExternalLoop(MpyIter *iter)
{
return (NIT_ITFLAGS(iter)&NPY_ITFLAG_EXLOOP) != 0;
}
/*NUMPY_API
* Whether the iterator is tracking a multi-index
*/
NPY_NO_EXPORT npy_bool
MpyIter_HasMultiIndex(MpyIter *iter)
{
return (NIT_ITFLAGS(iter)&NPY_ITFLAG_HASMULTIINDEX) != 0;
}
/*NUMPY_API
* Whether the iterator is tracking an index
*/
NPY_NO_EXPORT npy_bool
MpyIter_HasIndex(MpyIter *iter)
{
return (NIT_ITFLAGS(iter)&NPY_ITFLAG_HASINDEX) != 0;
}
/*NUMPY_API
* Checks to see whether this is the first time the elements
* of the specified reduction operand which the iterator points at are
* being seen for the first time. The function returns
* a reasonable answer for reduction operands and when buffering is
* disabled. The answer may be incorrect for buffered non-reduction
* operands.
*
* This function is intended to be used in EXTERNAL_LOOP mode only,
* and will produce some wrong answers when that mode is not enabled.
*
* If this function returns true, the caller should also
* check the inner loop stride of the operand, because if
* that stride is 0, then only the first element of the innermost
* external loop is being visited for the first time.
*
* WARNING: For performance reasons, 'iop' is not bounds-checked,
* it is not confirmed that 'iop' is actually a reduction
* operand, and it is not confirmed that EXTERNAL_LOOP
* mode is enabled. These checks are the responsibility of
* the caller, and should be done outside of any inner loops.
*/
NPY_NO_EXPORT MPY_TARGET_MIC npy_bool
MpyIter_IsFirstVisit(MpyIter *iter, int iop)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
NpyIter_AxisData *axisdata;
npy_intp sizeof_axisdata;
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
axisdata = NIT_AXISDATA(iter);
for (idim = 0; idim < ndim; ++idim) {
npy_intp coord = NAD_INDEX(axisdata);
npy_intp stride = NAD_STRIDES(axisdata)[iop];
/*
* If this is a reduction dimension and the coordinate
* is not at the start, it's definitely not the first visit
*/
if (stride == 0 && coord != 0) {
return 0;
}
NIT_ADVANCE_AXISDATA(axisdata, 1);
}
/*
* In reduction buffering mode, there's a double loop being
* tracked in the buffer part of the iterator data structure.
* We only need to check the outer level of this two-level loop,
* because of the requirement that EXTERNAL_LOOP be enabled.
*/
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
/* The outer reduce loop */
if (NBF_REDUCE_POS(bufferdata) != 0 &&
NBF_REDUCE_OUTERSTRIDES(bufferdata)[iop] == 0) {
return 0;
}
}
return 1;
}
/*NUMPY_API
* Whether the iteration could be done with no buffering.
*/
NPY_NO_EXPORT npy_bool
MpyIter_RequiresBuffering(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int iop, nop = NIT_NOP(iter);
npyiter_opitflags *op_itflags;
if (!(itflags&NPY_ITFLAG_BUFFER)) {
return 0;
}
op_itflags = NIT_OPITFLAGS(iter);
/* If any operand requires a cast, buffering is mandatory */
for (iop = 0; iop < nop; ++iop) {
if (op_itflags[iop]&NPY_OP_ITFLAG_CAST) {
return 1;
}
}
return 0;
}
/*NUMPY_API
* Whether the iteration loop, and in particular the iternext()
* function, needs API access. If this is true, the GIL must
* be retained while iterating.
*/
NPY_NO_EXPORT npy_bool
MpyIter_IterationNeedsAPI(MpyIter *iter)
{
return (NIT_ITFLAGS(iter)&NPY_ITFLAG_NEEDSAPI) != 0;
}
/*NUMPY_API
* Gets the number of dimensions being iterated
*/
NPY_NO_EXPORT int
MpyIter_GetNDim(MpyIter *iter)
{
return NIT_NDIM(iter);
}
/*NUMPY_API
* Gets the number of operands being iterated
*/
NPY_NO_EXPORT int
MpyIter_GetNOp(MpyIter *iter)
{
return NIT_NOP(iter);
}
/*NUMPY_API
* Gets the number of elements being iterated
*/
NPY_NO_EXPORT npy_intp
MpyIter_GetIterSize(MpyIter *iter)
{
return NIT_ITERSIZE(iter);
}
/*NUMPY_API
* Gets the device on which elements reside
*/
NPY_NO_EXPORT int
MpyIter_GetDevice(MpyIter *iter)
{
return NIT_DEVICE(iter);
}
/*NUMPY_API
* Whether the iterator is buffered
*/
NPY_NO_EXPORT npy_bool
MpyIter_IsBuffered(MpyIter *iter)
{
return (NIT_ITFLAGS(iter)&NPY_ITFLAG_BUFFER) != 0;
}
/*NUMPY_API
* Whether the inner loop can grow if buffering is unneeded
*/
NPY_NO_EXPORT npy_bool
MpyIter_IsGrowInner(MpyIter *iter)
{
return (NIT_ITFLAGS(iter)&NPY_ITFLAG_GROWINNER) != 0;
}
/*NUMPY_API
* Gets the size of the buffer, or 0 if buffering is not enabled
*/
NPY_NO_EXPORT npy_intp
MpyIter_GetBufferSize(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
return NBF_BUFFERSIZE(bufferdata);
}
else {
return 0;
}
}
/*NUMPY_API
* Gets the range of iteration indices being iterated
*/
NPY_NO_EXPORT void
MpyIter_GetIterIndexRange(MpyIter *iter,
npy_intp *istart, npy_intp *iend)
{
*istart = NIT_ITERSTART(iter);
*iend = NIT_ITEREND(iter);
}
/*NUMPY_API
* Gets the broadcast shape if a multi-index is being tracked by the iterator,
* otherwise gets the shape of the iteration as Fortran-order
* (fastest-changing index first).
*
* The reason Fortran-order is returned when a multi-index
* is not enabled is that this is providing a direct view into how
* the iterator traverses the n-dimensional space. The iterator organizes
* its memory from fastest index to slowest index, and when
* a multi-index is enabled, it uses a permutation to recover the original
* order.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
NPY_NO_EXPORT int
MpyIter_GetShape(MpyIter *iter, npy_intp *outshape)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
int idim, sizeof_axisdata;
NpyIter_AxisData *axisdata;
npy_int8 *perm;
axisdata = NIT_AXISDATA(iter);
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
if (itflags&NPY_ITFLAG_HASMULTIINDEX) {
perm = NIT_PERM(iter);
for(idim = 0; idim < ndim; ++idim) {
npy_int8 p = perm[idim];
if (p < 0) {
outshape[ndim+p] = NAD_SHAPE(axisdata);
}
else {
outshape[ndim-p-1] = NAD_SHAPE(axisdata);
}
NIT_ADVANCE_AXISDATA(axisdata, 1);
}
}
else {
for(idim = 0; idim < ndim; ++idim) {
outshape[idim] = NAD_SHAPE(axisdata);
NIT_ADVANCE_AXISDATA(axisdata, 1);
}
}
return NPY_SUCCEED;
}
/*NUMPY_API
* Builds a set of strides which are the same as the strides of an
* output array created using the NPY_ITER_ALLOCATE flag, where NULL
* was passed for op_axes. This is for data packed contiguously,
* but not necessarily in C or Fortran order. This should be used
* together with NpyIter_GetShape and NpyIter_GetNDim.
*
* A use case for this function is to match the shape and layout of
* the iterator and tack on one or more dimensions. For example,
* in order to generate a vector per input value for a numerical gradient,
* you pass in ndim*itemsize for itemsize, then add another dimension to
* the end with size ndim and stride itemsize. To do the Hessian matrix,
* you do the same thing but add two dimensions, or take advantage of
* the symmetry and pack it into 1 dimension with a particular encoding.
*
* This function may only be called if the iterator is tracking a multi-index
* and if NPY_ITER_DONT_NEGATE_STRIDES was used to prevent an axis from
* being iterated in reverse order.
*
* If an array is created with this method, simply adding 'itemsize'
* for each iteration will traverse the new array matching the
* iterator.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
NPY_NO_EXPORT int
MpyIter_CreateCompatibleStrides(MpyIter *iter,
npy_intp itemsize, npy_intp *outstrides)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
npy_intp sizeof_axisdata;
NpyIter_AxisData *axisdata;
npy_int8 *perm;
if (!(itflags&NPY_ITFLAG_HASMULTIINDEX)) {
PyErr_SetString(PyExc_RuntimeError,
"Iterator CreateCompatibleStrides may only be called "
"if a multi-index is being tracked");
return NPY_FAIL;
}
axisdata = NIT_AXISDATA(iter);
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
perm = NIT_PERM(iter);
for(idim = 0; idim < ndim; ++idim) {
npy_int8 p = perm[idim];
if (p < 0) {
PyErr_SetString(PyExc_RuntimeError,
"Iterator CreateCompatibleStrides may only be called "
"if DONT_NEGATE_STRIDES was used to prevent reverse "
"iteration of an axis");
return NPY_FAIL;
}
else {
outstrides[ndim-p-1] = itemsize;
}
itemsize *= NAD_SHAPE(axisdata);
NIT_ADVANCE_AXISDATA(axisdata, 1);
}
return NPY_SUCCEED;
}
/*NUMPY_API
* Get the array of data pointers (1 per object being iterated)
*
* This function may be safely called without holding the Python GIL.
*/
NPY_NO_EXPORT char **
MpyIter_GetDataPtrArray(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
return NBF_PTRS(bufferdata);
}
else {
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
return NAD_PTRS(axisdata);
}
}
NPY_NO_EXPORT npy_intp *
MpyIter_GetOffDataPtrArray(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
MpyIter *offiter = (MpyIter *) NIT_OFFITER(iter);
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(offiter);
return (npy_intp *)NBF_PTRS(bufferdata);
}
else {
NpyIter_AxisData *axisdata = NIT_AXISDATA(offiter);
return (npy_intp *)NAD_PTRS(axisdata);
}
}
/*NUMPY_API
* Get the array of data pointers (1 per object being iterated),
* directly into the arrays (never pointing to a buffer), for starting
* unbuffered iteration. This always returns the addresses for the
* iterator position as reset to iterator index 0.
*
* These pointers are different from the pointers accepted by
* NpyIter_ResetBasePointers, because the direction along some
* axes may have been reversed, requiring base offsets.
*
* This function may be safely called without holding the Python GIL.
*/
NPY_NO_EXPORT char **
MpyIter_GetInitialDataPtrArray(MpyIter *iter)
{
/*npy_uint32 itflags = NIT_ITFLAGS(iter);*/
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
return NIT_RESETDATAPTR(iter);
}
NPY_NO_EXPORT npy_intp *
MpyIter_GetOffInitialDataPtrArray(MpyIter *iter)
{
/*npy_uint32 itflags = NIT_ITFLAGS(iter);*/
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
MpyIter *offiter = (MpyIter *) NIT_OFFITER(iter);
return (npy_intp *)NIT_RESETDATAPTR(offiter);
}
/*NUMPY_API
* Get the array of data type pointers (1 per object being iterated)
*/
NPY_NO_EXPORT PyArray_Descr **
MpyIter_GetDescrArray(MpyIter *iter)
{
/*npy_uint32 itflags = NIT_ITFLAGS(iter);*/
/*int ndim = NIT_NDIM(iter);*/
/*int nop = NIT_NOP(iter);*/
return NIT_DTYPES(iter);
}
/*NUMPY_API
* Get the array of objects being iterated
*/
NPY_NO_EXPORT PyMicArrayObject **
MpyIter_GetOperandArray(MpyIter *iter)
{
/*npy_uint32 itflags = NIT_ITFLAGS(iter);*/
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
return NIT_OPERANDS(iter);
}
/*NUMPY_API
* Returns a view to the i-th object with the iterator's internal axes
*/
NPY_NO_EXPORT PyMicArrayObject *
MpyIter_GetIterView(MpyIter *iter, npy_intp i)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
int device = NIT_DEVICE(iter);
npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS];
PyMicArrayObject *obj, *view;
PyArray_Descr *dtype;
char *dataptr;
NpyIter_AxisData *axisdata;
npy_intp sizeof_axisdata;
int writeable;
if (i < 0) {
PyErr_SetString(PyExc_IndexError,
"index provided for an iterator view was out of bounds");
return NULL;
}
/* Don't provide views if buffering is enabled */
if (itflags&NPY_ITFLAG_BUFFER) {
PyErr_SetString(PyExc_ValueError,
"cannot provide an iterator view when buffering is enabled");
return NULL;
}
obj = NIT_OPERANDS(iter)[i];
dtype = PyMicArray_DESCR(obj);
writeable = NIT_OPITFLAGS(iter)[i]&NPY_OP_ITFLAG_WRITE;
dataptr = NIT_RESETDATAPTR(iter)[i];
axisdata = NIT_AXISDATA(iter);
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
/* Retrieve the shape and strides from the axisdata */
for (idim = 0; idim < ndim; ++idim) {
shape[ndim-idim-1] = NAD_SHAPE(axisdata);
strides[ndim-idim-1] = NAD_STRIDES(axisdata)[i];
NIT_ADVANCE_AXISDATA(axisdata, 1);
}
Py_INCREF(dtype);
view = (PyMicArrayObject *)PyMicArray_NewFromDescr(device,
&PyMicArray_Type, dtype, ndim,
shape, strides, dataptr,
writeable ? NPY_ARRAY_WRITEABLE : 0,
NULL);
if (view == NULL) {
return NULL;
}
/* Tell the view who owns the data */
Py_INCREF(obj);
if (PyMicArray_SetBaseObject(view, (PyObject *)obj) < 0) {
Py_DECREF(view);
return NULL;
}
/* Make sure all the flags are good */
PyMicArray_UpdateFlags(view, NPY_ARRAY_UPDATE_ALL);
return view;
}
/*NUMPY_API
* Get a pointer to the index, if it is being tracked
*/
NPY_NO_EXPORT npy_intp *
MpyIter_GetIndexPtr(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
if (itflags&NPY_ITFLAG_HASINDEX) {
/* The index is just after the data pointers */
return (npy_intp*)NAD_PTRS(axisdata) + nop;
}
else {
return NULL;
}
}
NPY_NO_EXPORT npy_intp *
MpyIter_GetOffIndexPtr(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
MpyIter *offiter = (MpyIter *) NIT_OFFITER(iter);
NpyIter_AxisData *axisdata = NIT_AXISDATA(offiter);
if (itflags&NPY_ITFLAG_HASINDEX) {
/* The index is just after the data pointers */
return (npy_intp*)NAD_PTRS(axisdata) + nop;
}
else {
return NULL;
}
}
/*NUMPY_API
* Gets an array of read flags (1 per object being iterated)
*/
NPY_NO_EXPORT void
MpyIter_GetReadFlags(MpyIter *iter, char *outreadflags)
{
/*npy_uint32 itflags = NIT_ITFLAGS(iter);*/
/*int ndim = NIT_NDIM(iter);*/
int iop, nop = NIT_NOP(iter);
npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
for (iop = 0; iop < nop; ++iop) {
outreadflags[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_READ) != 0;
}
}
/*NUMPY_API
* Gets an array of write flags (1 per object being iterated)
*/
NPY_NO_EXPORT void
MpyIter_GetWriteFlags(MpyIter *iter, char *outwriteflags)
{
/*npy_uint32 itflags = NIT_ITFLAGS(iter);*/
/*int ndim = NIT_NDIM(iter);*/
int iop, nop = NIT_NOP(iter);
npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
for (iop = 0; iop < nop; ++iop) {
outwriteflags[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_WRITE) != 0;
}
}
/*NUMPY_API
* Get the array of strides for the inner loop (when HasExternalLoop is true)
*
* This function may be safely called without holding the Python GIL.
*/
NPY_NO_EXPORT npy_intp *
MpyIter_GetInnerStrideArray(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *data = NIT_BUFFERDATA(iter);
return NBF_STRIDES(data);
}
else {
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
return NAD_STRIDES(axisdata);
}
}
NPY_NO_EXPORT npy_intp *
MpyIter_GetOffInnerStrideArray(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
MpyIter *offiter = (MpyIter *) NIT_OFFITER(iter);
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *data = NIT_BUFFERDATA(offiter);
return NBF_STRIDES(data);
}
else {
NpyIter_AxisData *axisdata = NIT_AXISDATA(offiter);
return NAD_STRIDES(axisdata);
}
}
static npy_intp *
getAxisStrideArray(MpyIter *initer, int axis, int isoffload) {
MpyIter *iter = (isoffload) ? (MpyIter *) NIT_OFFITER(iter) : initer;
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
npy_int8 *perm = NIT_PERM(iter);
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
if (axis < 0 || axis >= ndim) {
PyErr_SetString(PyExc_ValueError,
"axis out of bounds in iterator GetStrideAxisArray");
return NULL;
}
if (itflags&NPY_ITFLAG_HASMULTIINDEX) {
/* Reverse axis, since the iterator treats them that way */
axis = ndim-1-axis;
/* First find the axis in question */
for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) {
if (perm[idim] == axis || -1 - perm[idim] == axis) {
return NAD_STRIDES(axisdata);
}
}
}
else {
return NAD_STRIDES(NIT_INDEX_AXISDATA(axisdata, axis));
}
PyErr_SetString(PyExc_RuntimeError,
"internal error in iterator perm");
return NULL;
}
/*NUMPY_API
* Gets the array of strides for the specified axis.
* If the iterator is tracking a multi-index, gets the strides
* for the axis specified, otherwise gets the strides for
* the iteration axis as Fortran order (fastest-changing axis first).
*
* Returns NULL if an error occurs.
*/
NPY_NO_EXPORT npy_intp *
MpyIter_GetAxisStrideArray(MpyIter *iter, int axis)
{
return getAxisStrideArray(iter, axis, 0);
}
NPY_NO_EXPORT npy_intp *
MpyIter_GetOffAxisStrideArray(MpyIter *iter, int axis)
{
return getAxisStrideArray(iter, axis, 1);
}
/*NUMPY_API
* Get an array of strides which are fixed. Any strides which may
* change during iteration receive the value NPY_MAX_INTP. Once
* the iterator is ready to iterate, call this to get the strides
* which will always be fixed in the inner loop, then choose optimized
* inner loop functions which take advantage of those fixed strides.
*
* This function may be safely called without holding the Python GIL.
*/
NPY_NO_EXPORT void
MpyIter_GetInnerFixedStrideArray(MpyIter *iter, npy_intp *out_strides)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int ndim = NIT_NDIM(iter);
int iop, nop = NIT_NOP(iter);
NpyIter_AxisData *axisdata0 = NIT_AXISDATA(iter);
npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *data = NIT_BUFFERDATA(iter);
npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
npy_intp stride, *strides = NBF_STRIDES(data),
*ad_strides = NAD_STRIDES(axisdata0);
PyArray_Descr **dtypes = NIT_DTYPES(iter);
for (iop = 0; iop < nop; ++iop) {
stride = strides[iop];
/*
* Operands which are always/never buffered have fixed strides,
* and everything has fixed strides when ndim is 0 or 1
*/
if (ndim <= 1 || (op_itflags[iop]&
(NPY_OP_ITFLAG_CAST|NPY_OP_ITFLAG_BUFNEVER))) {
out_strides[iop] = stride;
}
/* If it's a reduction, 0-stride inner loop may have fixed stride */
else if (stride == 0 && (itflags&NPY_ITFLAG_REDUCE)) {
/* If it's a reduction operand, definitely fixed stride */
if (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) {
out_strides[iop] = stride;
}
/*
* Otherwise it's guaranteed to be a fixed stride if the
* stride is 0 for all the dimensions.
*/
else {
NpyIter_AxisData *axisdata = axisdata0;
int idim;
for (idim = 0; idim < ndim; ++idim) {
if (NAD_STRIDES(axisdata)[iop] != 0) {
break;
}
NIT_ADVANCE_AXISDATA(axisdata, 1);
}
/* If all the strides were 0, the stride won't change */
if (idim == ndim) {
out_strides[iop] = stride;
}
else {
out_strides[iop] = NPY_MAX_INTP;
}
}
}
/*
* Inner loop contiguous array means its stride won't change when
* switching between buffering and not buffering
*/
else if (ad_strides[iop] == dtypes[iop]->elsize) {
out_strides[iop] = ad_strides[iop];
}
/*
* Otherwise the strides can change if the operand is sometimes
* buffered, sometimes not.
*/
else {
out_strides[iop] = NPY_MAX_INTP;
}
}
}
else {
/* If there's no buffering, the strides are always fixed */
memcpy(out_strides, NAD_STRIDES(axisdata0), nop*NPY_SIZEOF_INTP);
}
}
/*NUMPY_API
* Get a pointer to the size of the inner loop (when HasExternalLoop is true)
*
* This function may be safely called without holding the Python GIL.
*/
NPY_NO_EXPORT npy_intp *
MpyIter_GetInnerLoopSizePtr(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *data = NIT_BUFFERDATA(iter);
return &NBF_SIZE(data);
}
else {
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
return &NAD_SHAPE(axisdata);
}
}
NPY_NO_EXPORT npy_intp *
MpyIter_GetOffInnerLoopSizePtr(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
/*int ndim = NIT_NDIM(iter);*/
int nop = NIT_NOP(iter);
MpyIter *offiter = (MpyIter *) NIT_OFFITER(iter);
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *data = NIT_BUFFERDATA(offiter);
return &NBF_SIZE(data);
}
else {
NpyIter_AxisData *axisdata = NIT_AXISDATA(offiter);
return &NAD_SHAPE(axisdata);
}
}
/*NUMPY_API
* For debugging
*/
NPY_NO_EXPORT void
MpyIter_DebugPrint(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int iop, nop = NIT_NOP(iter);
NpyIter_AxisData *axisdata;
npy_intp sizeof_axisdata;
NPY_ALLOW_C_API_DEF
NPY_ALLOW_C_API
printf("\n------ BEGIN ITERATOR DUMP ------\n");
printf("| Iterator Address: %p\n", (void *)iter);
printf("| ItFlags: ");
if (itflags&NPY_ITFLAG_IDENTPERM)
printf("IDENTPERM ");
if (itflags&NPY_ITFLAG_NEGPERM)
printf("NEGPERM ");
if (itflags&NPY_ITFLAG_HASINDEX)
printf("HASINDEX ");
if (itflags&NPY_ITFLAG_HASMULTIINDEX)
printf("HASMULTIINDEX ");
if (itflags&NPY_ITFLAG_FORCEDORDER)
printf("FORCEDORDER ");
if (itflags&NPY_ITFLAG_EXLOOP)
printf("EXLOOP ");
if (itflags&NPY_ITFLAG_RANGE)
printf("RANGE ");
if (itflags&NPY_ITFLAG_BUFFER)
printf("BUFFER ");
if (itflags&NPY_ITFLAG_GROWINNER)
printf("GROWINNER ");
if (itflags&NPY_ITFLAG_ONEITERATION)
printf("ONEITERATION ");
if (itflags&NPY_ITFLAG_DELAYBUF)
printf("DELAYBUF ");
if (itflags&NPY_ITFLAG_NEEDSAPI)
printf("NEEDSAPI ");
if (itflags&NPY_ITFLAG_REDUCE)
printf("REDUCE ");
if (itflags&NPY_ITFLAG_REUSE_REDUCE_LOOPS)
printf("REUSE_REDUCE_LOOPS ");
printf("\n");
printf("| NDim: %d\n", (int)ndim);
printf("| NOp: %d\n", (int)nop);
if (NIT_MASKOP(iter) >= 0) {
printf("| MaskOp: %d\n", (int)NIT_MASKOP(iter));
}
printf("| IterSize: %d\n", (int)NIT_ITERSIZE(iter));
printf("| IterStart: %d\n", (int)NIT_ITERSTART(iter));
printf("| IterEnd: %d\n", (int)NIT_ITEREND(iter));
printf("| IterIndex: %d\n", (int)NIT_ITERINDEX(iter));
printf("| Iterator SizeOf: %d\n",
(int)NIT_SIZEOF_ITERATOR(itflags, ndim, nop));
printf("| BufferData SizeOf: %d\n",
(int)NIT_BUFFERDATA_SIZEOF(itflags, ndim, nop));
printf("| AxisData SizeOf: %d\n",
(int)NIT_AXISDATA_SIZEOF(itflags, ndim, nop));
printf("|\n");
printf("| Perm: ");
for (idim = 0; idim < ndim; ++idim) {
printf("%d ", (int)NIT_PERM(iter)[idim]);
}
printf("\n");
printf("| DTypes: ");
for (iop = 0; iop < nop; ++iop) {
printf("%p ", (void *)NIT_DTYPES(iter)[iop]);
}
printf("\n");
printf("| DTypes: ");
for (iop = 0; iop < nop; ++iop) {
if (NIT_DTYPES(iter)[iop] != NULL)
PyObject_Print((PyObject*)NIT_DTYPES(iter)[iop], stdout, 0);
else
printf("(nil) ");
printf(" ");
}
printf("\n");
printf("| InitDataPtrs: ");
for (iop = 0; iop < nop; ++iop) {
printf("%p ", (void *)NIT_RESETDATAPTR(iter)[iop]);
}
printf("\n");
printf("| BaseOffsets: ");
for (iop = 0; iop < nop; ++iop) {
printf("%i ", (int)NIT_BASEOFFSETS(iter)[iop]);
}
printf("\n");
if (itflags&NPY_ITFLAG_HASINDEX) {
printf("| InitIndex: %d\n",
(int)(npy_intp)NIT_RESETDATAPTR(iter)[nop]);
}
printf("| Operands: ");
for (iop = 0; iop < nop; ++iop) {
printf("%p ", (void *)NIT_OPERANDS(iter)[iop]);
}
printf("\n");
printf("| Operand DTypes: ");
for (iop = 0; iop < nop; ++iop) {
PyArray_Descr *dtype;
if (NIT_OPERANDS(iter)[iop] != NULL) {
dtype = PyMicArray_DESCR(NIT_OPERANDS(iter)[iop]);
if (dtype != NULL)
PyObject_Print((PyObject *)dtype, stdout, 0);
else
printf("(nil) ");
}
else {
printf("(op nil) ");
}
printf(" ");
}
printf("\n");
printf("| OpItFlags:\n");
for (iop = 0; iop < nop; ++iop) {
printf("| Flags[%d]: ", (int)iop);
if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_READ)
printf("READ ");
if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_WRITE)
printf("WRITE ");
if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_CAST)
printf("CAST ");
if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_BUFNEVER)
printf("BUFNEVER ");
if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_ALIGNED)
printf("ALIGNED ");
if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_REDUCE)
printf("REDUCE ");
if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_VIRTUAL)
printf("VIRTUAL ");
if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_WRITEMASKED)
printf("WRITEMASKED ");
printf("\n");
}
printf("|\n");
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
printf("| BufferData:\n");
printf("| BufferSize: %d\n", (int)NBF_BUFFERSIZE(bufferdata));
printf("| Size: %d\n", (int)NBF_SIZE(bufferdata));
printf("| BufIterEnd: %d\n", (int)NBF_BUFITEREND(bufferdata));
if (itflags&NPY_ITFLAG_REDUCE) {
printf("| REDUCE Pos: %d\n",
(int)NBF_REDUCE_POS(bufferdata));
printf("| REDUCE OuterSize: %d\n",
(int)NBF_REDUCE_OUTERSIZE(bufferdata));
printf("| REDUCE OuterDim: %d\n",
(int)NBF_REDUCE_OUTERDIM(bufferdata));
}
printf("| Strides: ");
for (iop = 0; iop < nop; ++iop)
printf("%d ", (int)NBF_STRIDES(bufferdata)[iop]);
printf("\n");
/* Print the fixed strides when there's no inner loop */
if (itflags&NPY_ITFLAG_EXLOOP) {
npy_intp fixedstrides[NPY_MAXDIMS];
printf("| Fixed Strides: ");
MpyIter_GetInnerFixedStrideArray(iter, fixedstrides);
for (iop = 0; iop < nop; ++iop)
printf("%d ", (int)fixedstrides[iop]);
printf("\n");
}
printf("| Ptrs: ");
for (iop = 0; iop < nop; ++iop)
printf("%p ", (void *)NBF_PTRS(bufferdata)[iop]);
printf("\n");
if (itflags&NPY_ITFLAG_REDUCE) {
printf("| REDUCE Outer Strides: ");
for (iop = 0; iop < nop; ++iop)
printf("%d ", (int)NBF_REDUCE_OUTERSTRIDES(bufferdata)[iop]);
printf("\n");
printf("| REDUCE Outer Ptrs: ");
for (iop = 0; iop < nop; ++iop)
printf("%p ", (void *)NBF_REDUCE_OUTERPTRS(bufferdata)[iop]);
printf("\n");
}
printf("| ReadTransferFn: ");
for (iop = 0; iop < nop; ++iop)
printf("%p ", (void *)NBF_READTRANSFERFN(bufferdata)[iop]);
printf("\n");
printf("| ReadTransferData: ");
for (iop = 0; iop < nop; ++iop)
printf("%p ", (void *)NBF_READTRANSFERDATA(bufferdata)[iop]);
printf("\n");
printf("| WriteTransferFn: ");
for (iop = 0; iop < nop; ++iop)
printf("%p ", (void *)NBF_WRITETRANSFERFN(bufferdata)[iop]);
printf("\n");
printf("| WriteTransferData: ");
for (iop = 0; iop < nop; ++iop)
printf("%p ", (void *)NBF_WRITETRANSFERDATA(bufferdata)[iop]);
printf("\n");
printf("| Buffers: ");
for (iop = 0; iop < nop; ++iop)
printf("%p ", (void *)NBF_BUFFERS(bufferdata)[iop]);
printf("\n");
printf("|\n");
}
axisdata = NIT_AXISDATA(iter);
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) {
printf("| AxisData[%d]:\n", (int)idim);
printf("| Shape: %d\n", (int)NAD_SHAPE(axisdata));
printf("| Index: %d\n", (int)NAD_INDEX(axisdata));
printf("| Strides: ");
for (iop = 0; iop < nop; ++iop) {
printf("%d ", (int)NAD_STRIDES(axisdata)[iop]);
}
printf("\n");
if (itflags&NPY_ITFLAG_HASINDEX) {
printf("| Index Stride: %d\n", (int)NAD_STRIDES(axisdata)[nop]);
}
printf("| Ptrs: ");
for (iop = 0; iop < nop; ++iop) {
printf("%p ", (void *)NAD_PTRS(axisdata)[iop]);
}
printf("\n");
if (itflags&NPY_ITFLAG_HASINDEX) {
printf("| Index Value: %d\n",
(int)((npy_intp*)NAD_PTRS(axisdata))[nop]);
}
}
printf("------- END ITERATOR DUMP -------\n");
fflush(stdout);
NPY_DISABLE_C_API
}
NPY_NO_EXPORT void
mpyiter_coalesce_axes(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
npy_intp istrides, nstrides = NAD_NSTRIDES();
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
NpyIter_AxisData *ad_compress;
npy_intp new_ndim = 1;
/* The HASMULTIINDEX or IDENTPERM flags do not apply after coalescing */
NIT_ITFLAGS(iter) &= ~(NPY_ITFLAG_IDENTPERM|NPY_ITFLAG_HASMULTIINDEX);
axisdata = NIT_AXISDATA(iter);
ad_compress = axisdata;
for (idim = 0; idim < ndim-1; ++idim) {
int can_coalesce = 1;
npy_intp shape0 = NAD_SHAPE(ad_compress);
npy_intp shape1 = NAD_SHAPE(NIT_INDEX_AXISDATA(axisdata, 1));
npy_intp *strides0 = NAD_STRIDES(ad_compress);
npy_intp *strides1 = NAD_STRIDES(NIT_INDEX_AXISDATA(axisdata, 1));
/* Check that all the axes can be coalesced */
for (istrides = 0; istrides < nstrides; ++istrides) {
if (!((shape0 == 1 && strides0[istrides] == 0) ||
(shape1 == 1 && strides1[istrides] == 0)) &&
(strides0[istrides]*shape0 != strides1[istrides])) {
can_coalesce = 0;
break;
}
}
if (can_coalesce) {
npy_intp *strides = NAD_STRIDES(ad_compress);
NIT_ADVANCE_AXISDATA(axisdata, 1);
NAD_SHAPE(ad_compress) *= NAD_SHAPE(axisdata);
for (istrides = 0; istrides < nstrides; ++istrides) {
if (strides[istrides] == 0) {
strides[istrides] = NAD_STRIDES(axisdata)[istrides];
}
}
}
else {
NIT_ADVANCE_AXISDATA(axisdata, 1);
NIT_ADVANCE_AXISDATA(ad_compress, 1);
if (ad_compress != axisdata) {
memcpy(ad_compress, axisdata, sizeof_axisdata);
}
++new_ndim;
}
}
/*
* If the number of axes shrunk, reset the perm and
* compress the data into the new layout.
*/
if (new_ndim < ndim) {
npy_int8 *perm = NIT_PERM(iter);
/* Reset to an identity perm */
for (idim = 0; idim < new_ndim; ++idim) {
perm[idim] = (npy_int8)idim;
}
NIT_NDIM(iter) = new_ndim;
}
}
/*
* If errmsg is non-NULL, it should point to a variable which will
* receive the error message, and no Python exception will be set.
* This is so that the function can be called from code not holding
* the GIL.
*/
NPY_NO_EXPORT int
mpyiter_allocate_buffers(MpyIter *iter, char **errmsg)
{
/*npy_uint32 itflags = NIT_ITFLAGS(iter);*/
/*int ndim = NIT_NDIM(iter);*/
int iop = 0, nop = NIT_NOP(iter);
int device = NIT_DEVICE(iter);
npy_intp i;
npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
PyArray_Descr **op_dtype = NIT_DTYPES(iter);
npy_intp buffersize = NBF_BUFFERSIZE(bufferdata);
char *buffer, **buffers = NBF_BUFFERS(bufferdata);
for (iop = 0; iop < nop; ++iop) {
npyiter_opitflags flags = op_itflags[iop];
/*
* If we have determined that a buffer may be needed,
* allocate one.
*/
if (!(flags&NPY_OP_ITFLAG_BUFNEVER)) {
npy_intp itemsize = op_dtype[iop]->elsize;
buffer = target_malloc(itemsize*buffersize, device);
if (buffer == NULL) {
if (errmsg == NULL) {
PyErr_NoMemory();
}
else {
*errmsg = "out of memory";
}
goto fail;
}
buffers[iop] = buffer;
}
}
return 1;
fail:
for (i = 0; i < iop; ++i) {
if (buffers[i] != NULL) {
target_free(buffers[i], device);
buffers[i] = NULL;
}
}
return 0;
}
/*
* This sets the AXISDATA portion of the iterator to the specified
* iterindex, updating the pointers as well. This function does
* no error checking.
*/
NPY_NO_EXPORT void
mpyiter_goto_iterindex(MpyIter *iter, npy_intp iterindex)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
char **dataptr;
NpyIter_AxisData *axisdata;
npy_intp sizeof_axisdata;
npy_intp istrides, nstrides, i, shape;
axisdata = NIT_AXISDATA(iter);
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
nstrides = NAD_NSTRIDES();
NIT_ITERINDEX(iter) = iterindex;
ndim = ndim ? ndim : 1;
if (iterindex == 0) {
dataptr = NIT_RESETDATAPTR(iter);
for (idim = 0; idim < ndim; ++idim) {
char **ptrs;
NAD_INDEX(axisdata) = 0;
ptrs = NAD_PTRS(axisdata);
for (istrides = 0; istrides < nstrides; ++istrides) {
ptrs[istrides] = dataptr[istrides];
}
NIT_ADVANCE_AXISDATA(axisdata, 1);
}
}
else {
/*
* Set the multi-index, from the fastest-changing to the
* slowest-changing.
*/
axisdata = NIT_AXISDATA(iter);
shape = NAD_SHAPE(axisdata);
i = iterindex;
iterindex /= shape;
NAD_INDEX(axisdata) = i - iterindex * shape;
for (idim = 0; idim < ndim-1; ++idim) {
NIT_ADVANCE_AXISDATA(axisdata, 1);
shape = NAD_SHAPE(axisdata);
i = iterindex;
iterindex /= shape;
NAD_INDEX(axisdata) = i - iterindex * shape;
}
dataptr = NIT_RESETDATAPTR(iter);
/*
* Accumulate the successive pointers with their
* offsets in the opposite order, starting from the
* original data pointers.
*/
for (idim = 0; idim < ndim; ++idim) {
npy_intp *strides;
char **ptrs;
strides = NAD_STRIDES(axisdata);
ptrs = NAD_PTRS(axisdata);
i = NAD_INDEX(axisdata);
for (istrides = 0; istrides < nstrides; ++istrides) {
ptrs[istrides] = dataptr[istrides] + i*strides[istrides];
}
dataptr = ptrs;
NIT_ADVANCE_AXISDATA(axisdata, -1);
}
}
}
/*
* This gets called after the buffers have been exhausted, and
* their data needs to be written back to the arrays. The multi-index
* must be positioned for the beginning of the buffer.
*/
NPY_NO_EXPORT void
mpyiter_copy_from_buffers(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int ndim = NIT_NDIM(iter);
int iop, nop = NIT_NOP(iter);
int maskop = NIT_MASKOP(iter);
npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter),
*reduce_outeraxisdata = NULL;
PyArray_Descr **dtypes = NIT_DTYPES(iter);
npy_intp transfersize = NBF_SIZE(bufferdata);
npy_intp *strides = NBF_STRIDES(bufferdata),
*ad_strides = NAD_STRIDES(axisdata);
npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
int device = NIT_DEVICE(iter);
char **ad_ptrs = NAD_PTRS(axisdata);
char **buffers = NBF_BUFFERS(bufferdata);
char *buffer;
npy_intp reduce_outerdim = 0;
npy_intp *reduce_outerstrides = NULL;
PyMicArray_StridedUnaryOp *stransfer = NULL;
NpyAuxData *transferdata = NULL;
npy_intp axisdata_incr = NIT_AXISDATA_SIZEOF(itflags, ndim, nop) /
NPY_SIZEOF_INTP;
/* If we're past the end, nothing to copy */
if (NBF_SIZE(bufferdata) == 0) {
return;
}
NPY_IT_DBG_PRINT("Iterator: Copying buffers to outputs\n");
if (itflags&NPY_ITFLAG_REDUCE) {
reduce_outerdim = NBF_REDUCE_OUTERDIM(bufferdata);
reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata);
reduce_outeraxisdata = NIT_INDEX_AXISDATA(axisdata, reduce_outerdim);
transfersize *= NBF_REDUCE_OUTERSIZE(bufferdata);
}
for (iop = 0; iop < nop; ++iop) {
stransfer = NBF_WRITETRANSFERFN(bufferdata)[iop];
transferdata = NBF_WRITETRANSFERDATA(bufferdata)[iop];
buffer = buffers[iop];
/*
* Copy the data back to the arrays. If the type has refs,
* this function moves them so the buffer's refs are released.
*
* The flag USINGBUFFER is set when the buffer was used, so
* only copy back when this flag is on.
*/
if ((stransfer != NULL) &&
(op_itflags[iop]&(NPY_OP_ITFLAG_WRITE|NPY_OP_ITFLAG_USINGBUFFER))
== (NPY_OP_ITFLAG_WRITE|NPY_OP_ITFLAG_USINGBUFFER)) {
npy_intp op_transfersize;
npy_intp src_stride, *dst_strides, *dst_coords, *dst_shape;
int ndim_transfer;
NPY_IT_DBG_PRINT1("Iterator: Operand %d was buffered\n",
(int)iop);
/*
* If this operand is being reduced in the inner loop,
* its buffering stride was set to zero, and just
* one element was copied.
*/
if (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) {
if (strides[iop] == 0) {
if (reduce_outerstrides[iop] == 0) {
op_transfersize = 1;
src_stride = 0;
dst_strides = &src_stride;
dst_coords = &NAD_INDEX(reduce_outeraxisdata);
dst_shape = &NAD_SHAPE(reduce_outeraxisdata);
ndim_transfer = 1;
}
else {
op_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata);
src_stride = reduce_outerstrides[iop];
dst_strides =
&NAD_STRIDES(reduce_outeraxisdata)[iop];
dst_coords = &NAD_INDEX(reduce_outeraxisdata);
dst_shape = &NAD_SHAPE(reduce_outeraxisdata);
ndim_transfer = ndim - reduce_outerdim;
}
}
else {
if (reduce_outerstrides[iop] == 0) {
op_transfersize = NBF_SIZE(bufferdata);
src_stride = strides[iop];
dst_strides = &ad_strides[iop];
dst_coords = &NAD_INDEX(axisdata);
dst_shape = &NAD_SHAPE(axisdata);
ndim_transfer = reduce_outerdim ?
reduce_outerdim : 1;
}
else {
op_transfersize = transfersize;
src_stride = strides[iop];
dst_strides = &ad_strides[iop];
dst_coords = &NAD_INDEX(axisdata);
dst_shape = &NAD_SHAPE(axisdata);
ndim_transfer = ndim;
}
}
}
else {
op_transfersize = transfersize;
src_stride = strides[iop];
dst_strides = &ad_strides[iop];
dst_coords = &NAD_INDEX(axisdata);
dst_shape = &NAD_SHAPE(axisdata);
ndim_transfer = ndim;
}
NPY_IT_DBG_PRINT2("Iterator: Copying buffer to "
"operand %d (%d items)\n",
(int)iop, (int)op_transfersize);
/* WRITEMASKED operand */
if (op_itflags[iop] & NPY_OP_ITFLAG_WRITEMASKED) {
npy_bool *maskptr;
/*
* The mask pointer may be in the buffer or in
* the array, detect which one.
*/
if ((op_itflags[maskop]&NPY_OP_ITFLAG_USINGBUFFER) != 0) {
maskptr = (npy_bool *)buffers[maskop];
}
else {
maskptr = (npy_bool *)ad_ptrs[maskop];
}
PyMicArray_TransferMaskedStridedToNDim(ndim_transfer,
ad_ptrs[iop], dst_strides, axisdata_incr,
buffer, src_stride,
maskptr, strides[maskop],
dst_coords, axisdata_incr,
dst_shape, axisdata_incr,
op_transfersize, dtypes[iop]->elsize,
(PyMicArray_MaskedStridedUnaryOp *)stransfer,
transferdata, device);
}
/* Regular operand */
else {
PyMicArray_TransferStridedToNDim(ndim_transfer,
ad_ptrs[iop], dst_strides, axisdata_incr,
buffer, src_stride,
dst_coords, axisdata_incr,
dst_shape, axisdata_incr,
op_transfersize, dtypes[iop]->elsize,
stransfer,
transferdata, device);
}
}
/* If there's no copy back, we may have to decrement refs. In
* this case, the transfer function has a 'decsrcref' transfer
* function, so we can use it to do the decrement.
*
* The flag USINGBUFFER is set when the buffer was used, so
* only decrement refs when this flag is on.
*/
else if (stransfer != NULL &&
(op_itflags[iop]&NPY_OP_ITFLAG_USINGBUFFER) != 0) {
NPY_IT_DBG_PRINT1("Iterator: Freeing refs and zeroing buffer "
"of operand %d\n", (int)iop);
/* Decrement refs */
stransfer(NULL, 0, buffer, dtypes[iop]->elsize,
transfersize, dtypes[iop]->elsize,
transferdata, device);
/*
* Zero out the memory for safety. For instance,
* if during iteration some Python code copied an
* array pointing into the buffer, it will get None
* values for its references after this.
*/
target_memset(buffer, 0, dtypes[iop]->elsize*transfersize, device);
}
}
NPY_IT_DBG_PRINT("Iterator: Finished copying buffers to outputs\n");
}
/*
* This gets called after the iterator has been positioned to a multi-index
* for the start of a buffer. It decides which operands need a buffer,
* and copies the data into the buffers.
*/
NPY_NO_EXPORT void
mpyiter_copy_to_buffers(MpyIter *iter, char **prev_dataptrs)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int ndim = NIT_NDIM(iter);
int iop, nop = NIT_NOP(iter);
int device = NIT_DEVICE(iter);
npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter),
*reduce_outeraxisdata = NULL;
PyArray_Descr **dtypes = NIT_DTYPES(iter);
PyMicArrayObject **operands = NIT_OPERANDS(iter);
npy_intp *strides = NBF_STRIDES(bufferdata),
*ad_strides = NAD_STRIDES(axisdata);
npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
char **ptrs = NBF_PTRS(bufferdata), **ad_ptrs = NAD_PTRS(axisdata);
char **buffers = NBF_BUFFERS(bufferdata);
npy_intp iterindex, iterend, transfersize,
singlestridesize, reduce_innersize = 0, reduce_outerdim = 0;
int is_onestride = 0, any_buffered = 0;
npy_intp *reduce_outerstrides = NULL;
char **reduce_outerptrs = NULL;
PyMicArray_StridedUnaryOp *stransfer = NULL;
NpyAuxData *transferdata = NULL;
/*
* Have to get this flag before npyiter_checkreducesize sets
* it for the next iteration.
*/
npy_bool reuse_reduce_loops = (prev_dataptrs != NULL) &&
((itflags&NPY_ITFLAG_REUSE_REDUCE_LOOPS) != 0);
npy_intp axisdata_incr = NIT_AXISDATA_SIZEOF(itflags, ndim, nop) /
NPY_SIZEOF_INTP;
NPY_IT_DBG_PRINT("Iterator: Copying inputs to buffers\n");
/* Calculate the size if using any buffers */
iterindex = NIT_ITERINDEX(iter);
iterend = NIT_ITEREND(iter);
transfersize = NBF_BUFFERSIZE(bufferdata);
if (transfersize > iterend - iterindex) {
transfersize = iterend - iterindex;
}
/* If last time around, the reduce loop structure was full, we reuse it */
if (reuse_reduce_loops) {
npy_intp full_transfersize, prev_reduce_outersize;
prev_reduce_outersize = NBF_REDUCE_OUTERSIZE(bufferdata);
reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata);
reduce_outerptrs = NBF_REDUCE_OUTERPTRS(bufferdata);
reduce_outerdim = NBF_REDUCE_OUTERDIM(bufferdata);
reduce_outeraxisdata = NIT_INDEX_AXISDATA(axisdata, reduce_outerdim);
reduce_innersize = NBF_SIZE(bufferdata);
NBF_REDUCE_POS(bufferdata) = 0;
/*
* Try to do make the outersize as big as possible. This allows
* it to shrink when processing the last bit of the outer reduce loop,
* then grow again at the beginnning of the next outer reduce loop.
*/
NBF_REDUCE_OUTERSIZE(bufferdata) = (NAD_SHAPE(reduce_outeraxisdata)-
NAD_INDEX(reduce_outeraxisdata));
full_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata)*reduce_innersize;
/* If the full transfer size doesn't fit in the buffer, truncate it */
if (full_transfersize > NBF_BUFFERSIZE(bufferdata)) {
NBF_REDUCE_OUTERSIZE(bufferdata) = transfersize/reduce_innersize;
transfersize = NBF_REDUCE_OUTERSIZE(bufferdata)*reduce_innersize;
}
else {
transfersize = full_transfersize;
}
if (prev_reduce_outersize < NBF_REDUCE_OUTERSIZE(bufferdata)) {
/*
* If the previous time around less data was copied it may not
* be safe to reuse the buffers even if the pointers match.
*/
reuse_reduce_loops = 0;
}
NBF_BUFITEREND(bufferdata) = iterindex + reduce_innersize;
NPY_IT_DBG_PRINT3("Reused reduce transfersize: %d innersize: %d "
"itersize: %d\n",
(int)transfersize,
(int)reduce_innersize,
(int)MpyIter_GetIterSize(iter));
NPY_IT_DBG_PRINT1("Reduced reduce outersize: %d",
(int)NBF_REDUCE_OUTERSIZE(bufferdata));
}
/*
* If there are any reduction operands, we may have to make
* the size smaller so we don't copy the same value into
* a buffer twice, as the buffering does not have a mechanism
* to combine values itself.
*/
else if (itflags&NPY_ITFLAG_REDUCE) {
NPY_IT_DBG_PRINT("Iterator: Calculating reduce loops\n");
transfersize = npyiter_checkreducesize(iter, transfersize,
&reduce_innersize,
&reduce_outerdim);
NPY_IT_DBG_PRINT3("Reduce transfersize: %d innersize: %d "
"itersize: %d\n",
(int)transfersize,
(int)reduce_innersize,
(int)MpyIter_GetIterSize(iter));
reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata);
reduce_outerptrs = NBF_REDUCE_OUTERPTRS(bufferdata);
reduce_outeraxisdata = NIT_INDEX_AXISDATA(axisdata, reduce_outerdim);
NBF_SIZE(bufferdata) = reduce_innersize;
NBF_REDUCE_POS(bufferdata) = 0;
NBF_REDUCE_OUTERDIM(bufferdata) = reduce_outerdim;
NBF_BUFITEREND(bufferdata) = iterindex + reduce_innersize;
if (reduce_innersize == 0) {
NBF_REDUCE_OUTERSIZE(bufferdata) = 0;
return;
}
else {
NBF_REDUCE_OUTERSIZE(bufferdata) = transfersize/reduce_innersize;
}
}
else {
NBF_SIZE(bufferdata) = transfersize;
NBF_BUFITEREND(bufferdata) = iterindex + transfersize;
}
/* Calculate the maximum size if using a single stride and no buffers */
singlestridesize = NAD_SHAPE(axisdata)-NAD_INDEX(axisdata);
if (singlestridesize > iterend - iterindex) {
singlestridesize = iterend - iterindex;
}
if (singlestridesize >= transfersize) {
is_onestride = 1;
}
for (iop = 0; iop < nop; ++iop) {
/*
* If the buffer is write-only, these two are NULL, and the buffer
* pointers will be set up but the read copy won't be done
*/
stransfer = NBF_READTRANSFERFN(bufferdata)[iop];
transferdata = NBF_READTRANSFERDATA(bufferdata)[iop];
switch (op_itflags[iop]&
(NPY_OP_ITFLAG_BUFNEVER|
NPY_OP_ITFLAG_CAST|
NPY_OP_ITFLAG_REDUCE)) {
/* Never need to buffer this operand */
case NPY_OP_ITFLAG_BUFNEVER:
ptrs[iop] = ad_ptrs[iop];
if (itflags&NPY_ITFLAG_REDUCE) {
reduce_outerstrides[iop] = reduce_innersize *
strides[iop];
reduce_outerptrs[iop] = ptrs[iop];
}
/*
* Should not adjust the stride - ad_strides[iop]
* could be zero, but strides[iop] was initialized
* to the first non-trivial stride.
*/
stransfer = NULL;
/* The flag NPY_OP_ITFLAG_USINGBUFFER can be ignored here */
break;
/* Never need to buffer this operand */
case NPY_OP_ITFLAG_BUFNEVER|NPY_OP_ITFLAG_REDUCE:
ptrs[iop] = ad_ptrs[iop];
reduce_outerptrs[iop] = ptrs[iop];
reduce_outerstrides[iop] = 0;
/*
* Should not adjust the stride - ad_strides[iop]
* could be zero, but strides[iop] was initialized
* to the first non-trivial stride.
*/
stransfer = NULL;
/* The flag NPY_OP_ITFLAG_USINGBUFFER can be ignored here */
break;
/* Just a copy */
case 0:
/* Do not reuse buffer if it did not exist */
if (!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER) &&
(prev_dataptrs != NULL)) {
prev_dataptrs[iop] = NULL;
}
/*
* No copyswap or cast was requested, so all we're
* doing is copying the data to fill the buffer and
* produce a single stride. If the underlying data
* already does that, no need to copy it.
*/
if (is_onestride) {
ptrs[iop] = ad_ptrs[iop];
strides[iop] = ad_strides[iop];
stransfer = NULL;
/* Signal that the buffer is not being used */
op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
/* If some other op is reduced, we have a double reduce loop */
else if ((itflags&NPY_ITFLAG_REDUCE) &&
(reduce_outerdim == 1) &&
(transfersize/reduce_innersize <=
NAD_SHAPE(reduce_outeraxisdata) -
NAD_INDEX(reduce_outeraxisdata))) {
ptrs[iop] = ad_ptrs[iop];
reduce_outerptrs[iop] = ptrs[iop];
strides[iop] = ad_strides[iop];
reduce_outerstrides[iop] =
NAD_STRIDES(reduce_outeraxisdata)[iop];
stransfer = NULL;
/* Signal that the buffer is not being used */
op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
else {
/* In this case, the buffer is being used */
ptrs[iop] = buffers[iop];
strides[iop] = dtypes[iop]->elsize;
if (itflags&NPY_ITFLAG_REDUCE) {
reduce_outerstrides[iop] = reduce_innersize *
strides[iop];
reduce_outerptrs[iop] = ptrs[iop];
}
/* Signal that the buffer is being used */
op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER;
}
break;
/* Just a copy, but with a reduction */
case NPY_OP_ITFLAG_REDUCE:
/* Do not reuse buffer if it did not exist */
if (!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER) &&
(prev_dataptrs != NULL)) {
prev_dataptrs[iop] = NULL;
}
if (ad_strides[iop] == 0) {
strides[iop] = 0;
/* It's all in one stride in the inner loop dimension */
if (is_onestride) {
NPY_IT_DBG_PRINT1("reduce op %d all one stride\n", (int)iop);
ptrs[iop] = ad_ptrs[iop];
reduce_outerstrides[iop] = 0;
stransfer = NULL;
/* Signal that the buffer is not being used */
op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
/* It's all in one stride in the reduce outer loop */
else if ((reduce_outerdim > 0) &&
(transfersize/reduce_innersize <=
NAD_SHAPE(reduce_outeraxisdata) -
NAD_INDEX(reduce_outeraxisdata))) {
NPY_IT_DBG_PRINT1("reduce op %d all one outer stride\n",
(int)iop);
ptrs[iop] = ad_ptrs[iop];
/* Outer reduce loop advances by one item */
reduce_outerstrides[iop] =
NAD_STRIDES(reduce_outeraxisdata)[iop];
stransfer = NULL;
/* Signal that the buffer is not being used */
op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
/* In this case, the buffer is being used */
else {
NPY_IT_DBG_PRINT1("reduce op %d must buffer\n", (int)iop);
ptrs[iop] = buffers[iop];
/* Both outer and inner reduce loops have stride 0 */
if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) {
reduce_outerstrides[iop] = 0;
}
/* Outer reduce loop advances by one item */
else {
reduce_outerstrides[iop] = dtypes[iop]->elsize;
}
/* Signal that the buffer is being used */
op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER;
}
}
else if (is_onestride) {
NPY_IT_DBG_PRINT1("reduce op %d all one stride in dim 0\n", (int)iop);
ptrs[iop] = ad_ptrs[iop];
strides[iop] = ad_strides[iop];
reduce_outerstrides[iop] = 0;
stransfer = NULL;
/* Signal that the buffer is not being used */
op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
else {
/* It's all in one stride in the reduce outer loop */
if ((reduce_outerdim == 1) &&
(transfersize/reduce_innersize <=
NAD_SHAPE(reduce_outeraxisdata) -
NAD_INDEX(reduce_outeraxisdata))) {
ptrs[iop] = ad_ptrs[iop];
strides[iop] = ad_strides[iop];
/* Outer reduce loop advances by one item */
reduce_outerstrides[iop] =
NAD_STRIDES(reduce_outeraxisdata)[iop];
stransfer = NULL;
/* Signal that the buffer is not being used */
op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
/* In this case, the buffer is being used */
else {
ptrs[iop] = buffers[iop];
strides[iop] = dtypes[iop]->elsize;
if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) {
/* Reduction in outer reduce loop */
reduce_outerstrides[iop] = 0;
}
else {
/* Advance to next items in outer reduce loop */
reduce_outerstrides[iop] = reduce_innersize *
dtypes[iop]->elsize;
}
/* Signal that the buffer is being used */
op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER;
}
}
reduce_outerptrs[iop] = ptrs[iop];
break;
default:
/* In this case, the buffer is always being used */
any_buffered = 1;
/* Signal that the buffer is being used */
op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER;
if (!(op_itflags[iop]&NPY_OP_ITFLAG_REDUCE)) {
ptrs[iop] = buffers[iop];
strides[iop] = dtypes[iop]->elsize;
if (itflags&NPY_ITFLAG_REDUCE) {
reduce_outerstrides[iop] = reduce_innersize *
strides[iop];
reduce_outerptrs[iop] = ptrs[iop];
}
}
/* The buffer is being used with reduction */
else {
ptrs[iop] = buffers[iop];
if (ad_strides[iop] == 0) {
NPY_IT_DBG_PRINT1("cast op %d has innermost stride 0\n", (int)iop);
strides[iop] = 0;
/* Both outer and inner reduce loops have stride 0 */
if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) {
NPY_IT_DBG_PRINT1("cast op %d has outermost stride 0\n", (int)iop);
reduce_outerstrides[iop] = 0;
}
/* Outer reduce loop advances by one item */
else {
NPY_IT_DBG_PRINT1("cast op %d has outermost stride !=0\n", (int)iop);
reduce_outerstrides[iop] = dtypes[iop]->elsize;
}
}
else {
NPY_IT_DBG_PRINT1("cast op %d has innermost stride !=0\n", (int)iop);
strides[iop] = dtypes[iop]->elsize;
if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) {
NPY_IT_DBG_PRINT1("cast op %d has outermost stride 0\n", (int)iop);
/* Reduction in outer reduce loop */
reduce_outerstrides[iop] = 0;
}
else {
NPY_IT_DBG_PRINT1("cast op %d has outermost stride !=0\n", (int)iop);
/* Advance to next items in outer reduce loop */
reduce_outerstrides[iop] = reduce_innersize *
dtypes[iop]->elsize;
}
}
reduce_outerptrs[iop] = ptrs[iop];
}
break;
}
if (stransfer != NULL) {
npy_intp src_itemsize;
npy_intp op_transfersize;
npy_intp dst_stride, *src_strides, *src_coords, *src_shape;
int ndim_transfer;
npy_bool skip_transfer = 0;
src_itemsize = PyMicArray_DTYPE(operands[iop])->elsize;
/* If stransfer wasn't set to NULL, buffering is required */
any_buffered = 1;
/*
* If this operand is being reduced in the inner loop,
* set its buffering stride to zero, and just copy
* one element.
*/
if (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) {
if (ad_strides[iop] == 0) {
strides[iop] = 0;
if (reduce_outerstrides[iop] == 0) {
op_transfersize = 1;
dst_stride = 0;
src_strides = &dst_stride;
src_coords = &NAD_INDEX(reduce_outeraxisdata);
src_shape = &NAD_SHAPE(reduce_outeraxisdata);
ndim_transfer = 1;
/*
* When we're reducing a single element, and
* it's still the same element, don't overwrite
* it even when reuse reduce loops is unset.
* This preserves the precision of the
* intermediate calculation.
*/
if (prev_dataptrs &&
prev_dataptrs[iop] == ad_ptrs[iop]) {
NPY_IT_DBG_PRINT1("Iterator: skipping operand %d"
" copy because it's a 1-element reduce\n",
(int)iop);
skip_transfer = 1;
}
}
else {
op_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata);
dst_stride = reduce_outerstrides[iop];
src_strides = &NAD_STRIDES(reduce_outeraxisdata)[iop];
src_coords = &NAD_INDEX(reduce_outeraxisdata);
src_shape = &NAD_SHAPE(reduce_outeraxisdata);
ndim_transfer = ndim - reduce_outerdim;
}
}
else {
if (reduce_outerstrides[iop] == 0) {
op_transfersize = NBF_SIZE(bufferdata);
dst_stride = strides[iop];
src_strides = &ad_strides[iop];
src_coords = &NAD_INDEX(axisdata);
src_shape = &NAD_SHAPE(axisdata);
ndim_transfer = reduce_outerdim ? reduce_outerdim : 1;
}
else {
op_transfersize = transfersize;
dst_stride = strides[iop];
src_strides = &ad_strides[iop];
src_coords = &NAD_INDEX(axisdata);
src_shape = &NAD_SHAPE(axisdata);
ndim_transfer = ndim;
}
}
}
else {
op_transfersize = transfersize;
dst_stride = strides[iop];
src_strides = &ad_strides[iop];
src_coords = &NAD_INDEX(axisdata);
src_shape = &NAD_SHAPE(axisdata);
ndim_transfer = ndim;
}
/*
* If the whole buffered loop structure remains the same,
* and the source pointer for this data didn't change,
* we don't have to copy the data again.
*/
if (reuse_reduce_loops && prev_dataptrs[iop] == ad_ptrs[iop]) {
NPY_IT_DBG_PRINT2("Iterator: skipping operands %d "
"copy (%d items) because loops are reused and the data "
"pointer didn't change\n",
(int)iop, (int)op_transfersize);
skip_transfer = 1;
}
/* If the data type requires zero-inititialization */
if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) {
NPY_IT_DBG_PRINT("Iterator: Buffer requires init, "
"memsetting to 0\n");
target_memset(ptrs[iop], 0, dtypes[iop]->elsize*op_transfersize, device);
/* Can't skip the transfer in this case */
skip_transfer = 0;
}
if (!skip_transfer) {
NPY_IT_DBG_PRINT2("Iterator: Copying operand %d to "
"buffer (%d items)\n",
(int)iop, (int)op_transfersize);
PyMicArray_TransferNDimToStrided(ndim_transfer,
ptrs[iop], dst_stride,
ad_ptrs[iop], src_strides, axisdata_incr,
src_coords, axisdata_incr,
src_shape, axisdata_incr,
op_transfersize, src_itemsize,
stransfer,
transferdata, device);
}
}
else if (ptrs[iop] == buffers[iop]) {
/* If the data type requires zero-inititialization */
if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) {
NPY_IT_DBG_PRINT1("Iterator: Write-only buffer for "
"operand %d requires init, "
"memsetting to 0\n", (int)iop);
target_memset(ptrs[iop], 0, dtypes[iop]->elsize*transfersize, device);
}
}
}
/*
* If buffering wasn't needed, we can grow the inner
* loop to as large as possible.
*
* TODO: Could grow REDUCE loop too with some more logic above.
*/
if (!any_buffered && (itflags&NPY_ITFLAG_GROWINNER) &&
!(itflags&NPY_ITFLAG_REDUCE)) {
if (singlestridesize > transfersize) {
NPY_IT_DBG_PRINT2("Iterator: Expanding inner loop size "
"from %d to %d since buffering wasn't needed\n",
(int)NBF_SIZE(bufferdata), (int)singlestridesize);
NBF_SIZE(bufferdata) = singlestridesize;
NBF_BUFITEREND(bufferdata) = iterindex + singlestridesize;
}
}
NPY_IT_DBG_PRINT1("Any buffering needed: %d\n", any_buffered);
NPY_IT_DBG_PRINT1("Iterator: Finished copying inputs to buffers "
"(buffered size is %d)\n", (int)NBF_SIZE(bufferdata));
}
/*
* This checks how much space can be buffered without encountering the
* same value twice, or for operands whose innermost stride is zero,
* without encountering a different value. By reducing the buffered
* amount to this size, reductions can be safely buffered.
*
* Reductions are buffered with two levels of looping, to avoid
* frequent copying to the buffers. The return value is the over-all
* buffer size, and when the flag NPY_ITFLAG_REDUCE is set, reduce_innersize
* receives the size of the inner of the two levels of looping.
*
* The value placed in reduce_outerdim is the index into the AXISDATA
* for where the second level of the double loop begins.
*
* The return value is always a multiple of the value placed in
* reduce_innersize.
*/
static npy_intp
npyiter_checkreducesize(MpyIter *iter, npy_intp count,
npy_intp *reduce_innersize,
npy_intp *reduce_outerdim)
{
//TODO: reduce
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
int iop, nop = NIT_NOP(iter);
NpyIter_AxisData *axisdata;
npy_intp sizeof_axisdata;
npy_intp coord, shape, *strides;
npy_intp reducespace = 1, factor;
npy_bool nonzerocoord;
npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
char stride0op[NPY_MAXARGS];
/* Default to no outer axis */
*reduce_outerdim = 0;
/* If there's only one dimension, no need to calculate anything */
if (ndim == 1 || count == 0) {
*reduce_innersize = count;
return count;
}
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
axisdata = NIT_AXISDATA(iter);
/* Indicate which REDUCE operands have stride 0 in the inner loop */
strides = NAD_STRIDES(axisdata);
for (iop = 0; iop < nop; ++iop) {
stride0op[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) &&
(strides[iop] == 0);
NPY_IT_DBG_PRINT2("Iterator: Operand %d has stride 0 in "
"the inner loop? %d\n", iop, (int)stride0op[iop]);
}
shape = NAD_SHAPE(axisdata);
coord = NAD_INDEX(axisdata);
reducespace += (shape-coord-1);
factor = shape;
NIT_ADVANCE_AXISDATA(axisdata, 1);
/* Initialize nonzerocoord based on the first coordinate */
nonzerocoord = (coord != 0);
/* Go forward through axisdata, calculating the space available */
for (idim = 1; idim < ndim && reducespace < count;
++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) {
NPY_IT_DBG_PRINT2("Iterator: inner loop reducespace %d, count %d\n",
(int)reducespace, (int)count);
strides = NAD_STRIDES(axisdata);
for (iop = 0; iop < nop; ++iop) {
/*
* If a reduce stride switched from zero to non-zero, or
* vice versa, that's the point where the data will stop
* being the same element or will repeat, and if the
* buffer starts with an all zero multi-index up to this
* point, gives us the reduce_innersize.
*/
if((stride0op[iop] && (strides[iop] != 0)) ||
(!stride0op[iop] &&
(strides[iop] == 0) &&
(op_itflags[iop]&NPY_OP_ITFLAG_REDUCE))) {
NPY_IT_DBG_PRINT1("Iterator: Reduce operation limits "
"buffer to %d\n", (int)reducespace);
/*
* If we already found more elements than count, or
* the starting coordinate wasn't zero, the two-level
* looping is unnecessary/can't be done, so return.
*/
if (count <= reducespace) {
*reduce_innersize = count;
NIT_ITFLAGS(iter) |= NPY_ITFLAG_REUSE_REDUCE_LOOPS;
return count;
}
else if (nonzerocoord) {
if (reducespace < count) {
count = reducespace;
}
*reduce_innersize = count;
/* NOTE: This is similar to the (coord != 0) case below. */
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS;
return count;
}
else {
*reduce_innersize = reducespace;
break;
}
}
}
/* If we broke out of the loop early, we found reduce_innersize */
if (iop != nop) {
NPY_IT_DBG_PRINT2("Iterator: Found first dim not "
"reduce (%d of %d)\n", iop, nop);
break;
}
shape = NAD_SHAPE(axisdata);
coord = NAD_INDEX(axisdata);
if (coord != 0) {
nonzerocoord = 1;
}
reducespace += (shape-coord-1) * factor;
factor *= shape;
}
/*
* If there was any non-zero coordinate, the reduction inner
* loop doesn't fit in the buffersize, or the reduction inner loop
* covered the entire iteration size, can't do the double loop.
*/
if (nonzerocoord || count < reducespace || idim == ndim) {
if (reducespace < count) {
count = reducespace;
}
*reduce_innersize = count;
/* In this case, we can't reuse the reduce loops */
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS;
return count;
}
coord = NAD_INDEX(axisdata);
if (coord != 0) {
/*
* In this case, it is only safe to reuse the buffer if the amount
* of data copied is not more then the current axes, as is the
* case when reuse_reduce_loops was active already.
* It should be in principle OK when the idim loop returns immidiatly.
*/
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS;
}
else {
/* In this case, we can reuse the reduce loops */
NIT_ITFLAGS(iter) |= NPY_ITFLAG_REUSE_REDUCE_LOOPS;
}
*reduce_innersize = reducespace;
count /= reducespace;
NPY_IT_DBG_PRINT2("Iterator: reduce_innersize %d count /ed %d\n",
(int)reducespace, (int)count);
/*
* Continue through the rest of the dimensions. If there are
* two separated reduction axes, we may have to cut the buffer
* short again.
*/
*reduce_outerdim = idim;
reducespace = 1;
factor = 1;
/* Indicate which REDUCE operands have stride 0 at the current level */
strides = NAD_STRIDES(axisdata);
for (iop = 0; iop < nop; ++iop) {
stride0op[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) &&
(strides[iop] == 0);
NPY_IT_DBG_PRINT2("Iterator: Operand %d has stride 0 in "
"the outer loop? %d\n", iop, (int)stride0op[iop]);
}
shape = NAD_SHAPE(axisdata);
reducespace += (shape-coord-1) * factor;
factor *= shape;
NIT_ADVANCE_AXISDATA(axisdata, 1);
++idim;
for (; idim < ndim && reducespace < count;
++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) {
NPY_IT_DBG_PRINT2("Iterator: outer loop reducespace %d, count %d\n",
(int)reducespace, (int)count);
strides = NAD_STRIDES(axisdata);
for (iop = 0; iop < nop; ++iop) {
/*
* If a reduce stride switched from zero to non-zero, or
* vice versa, that's the point where the data will stop
* being the same element or will repeat, and if the
* buffer starts with an all zero multi-index up to this
* point, gives us the reduce_innersize.
*/
if((stride0op[iop] && (strides[iop] != 0)) ||
(!stride0op[iop] &&
(strides[iop] == 0) &&
(op_itflags[iop]&NPY_OP_ITFLAG_REDUCE))) {
NPY_IT_DBG_PRINT1("Iterator: Reduce operation limits "
"buffer to %d\n", (int)reducespace);
/*
* This terminates the outer level of our double loop.
*/
if (count <= reducespace) {
return count * (*reduce_innersize);
}
else {
return reducespace * (*reduce_innersize);
}
}
}
shape = NAD_SHAPE(axisdata);
coord = NAD_INDEX(axisdata);
if (coord != 0) {
nonzerocoord = 1;
}
reducespace += (shape-coord-1) * factor;
factor *= shape;
}
if (reducespace < count) {
count = reducespace;
}
return count * (*reduce_innersize);
}
/* This is called when an iter is updated in host.
* This function transfer to iter data to corresponding
* iter on mic device
*/
NPY_NO_EXPORT void mpyiter_update_offiter(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
void *offiter = (void *) NIT_OFFITER(iter);
target_memcpy(offiter, (void *)iter,
NIT_SIZEOF_ITERATOR(itflags, ndim, nop),
NIT_DEVICE(iter), CPU_DEVICE);
}
/* This function is called when iter's bufferdata is
* updated in host.
* Initially intended to used in internext functions
*/
NPY_NO_EXPORT void mpyiter_update_offbuffdata(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
MpyIter *offiter = NIT_OFFITER(iter);
target_memcpy(NIT_BUFFERDATA(offiter), NIT_BUFFERDATA(iter),
NIT_BUFFERDATA_SIZEOF(itflags, ndim, nop),
NIT_DEVICE(iter), CPU_DEVICE);
}
/* This function is called when iter's axisdata is
* updated in host.
* Initially intended to used in internext functions.
*/
NPY_NO_EXPORT void mpyiter_update_offaxisdata(MpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int ndim = NIT_NDIM(iter);
int nop = NIT_NOP(iter);
MpyIter *offiter = NIT_OFFITER(iter);
target_memcpy(NIT_AXISDATA(offiter), NIT_AXISDATA(iter),
NIT_AXISDATA_SIZEOF(itflags, ndim, nop)*(ndim ? ndim : 1),
NIT_DEVICE(iter), CPU_DEVICE);
}
NPY_NO_EXPORT void MpyIter_UpdateOffIter(MpyIter *iter)
{
mpyiter_update_offiter(iter);
}
static void dummyOffloadBuild(void)
{
#pragma omp target
{
void *f;
f = &MpyIter_IsFirstVisit;
}
}
#undef MPY_ITERATOR_IMPLEMENTATION_CODE
|
main.c | #include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N 1000
#define Eps 1e-7
#pragma omp declare target
void func_1v(float*, float*, unsigned);
void func_2v(float*, float*, unsigned);
void func_3v(float*, float*, unsigned);
#pragma omp end declare target
void hfunc0(float*, float*, unsigned);
void hfunc1(float*, float*, unsigned);
void hfunc2(float*, float*, unsigned);
void hfunc3(float*, float*, unsigned);
int main(){
float a[N], t1[N], t2[N], s = 0;
unsigned i;
unsigned nErr = 0;
srand((unsigned int)time(NULL));
#pragma omp parallel for
for(i=0; i<N; ++i){
a[i]=rand()%100;
}
func_1v(a,t1,N);
func_3v(a,t2,N);
#pragma omp parallel for reduction(+:s)
for(i=0; i<N; ++i) s += t1[i];
if(s < Eps){
printf("Check 0: All elemets are zeros!\n");
return -1;
}
for(i=0; i<N; ++i){
if(fabs(t1[i]-t2[i]) >= Eps){
++nErr;
printf("Check 1: error at %d: %e >= %e\n",i,fabs(t1[i]-t2[i]),Eps);
}
}
func_2v(t1,t2,N);
for(i=0; i<N; ++i){
if(fabs(a[i]-t2[i]) >= Eps){
++nErr;
printf("Check 2: error at %d: %e >= %e\n",i,fabs(a[i]-t2[i]),Eps);
}
}
hfunc0(a, t1, N);
hfunc1(a, t1, N);
hfunc3(a, t2, N);
hfunc2(t1, t2, N);
if(!nErr) printf("Success\n");
return nErr;
}
|
GB_binop__ge_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ge_fp64
// A.*B function (eWiseMult): GB_AemultB__ge_fp64
// A*D function (colscale): GB_AxD__ge_fp64
// D*A function (rowscale): GB_DxB__ge_fp64
// C+=B function (dense accum): GB_Cdense_accumB__ge_fp64
// C+=b function (dense accum): GB_Cdense_accumb__ge_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_fp64
// C=scalar+B GB_bind1st__ge_fp64
// C=scalar+B' GB_bind1st_tran__ge_fp64
// C=A+scalar GB_bind2nd__ge_fp64
// C=A'+scalar GB_bind2nd_tran__ge_fp64
// C type: bool
// A type: double
// B,b type: double
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_FP64 || GxB_NO_GE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ge_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ge_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ge_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ge_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ge_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__ge_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__first_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__first_uint32
// A.*B function (eWiseMult): GB_AemultB__first_uint32
// A*D function (colscale): GB_AxD__first_uint32
// D*A function (rowscale): GB_DxB__first_uint32
// C+=B function (dense accum): GB_Cdense_accumB__first_uint32
// C+=b function (dense accum): GB_Cdense_accumb__first_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_uint32
// C=scalar+B GB_bind1st__first_uint32
// C=scalar+B' GB_bind1st_tran__first_uint32
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = x ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_UINT32 || GxB_NO_FIRST_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__first_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__first_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__first_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__first_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__first_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__first_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__first_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__first_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB_bind1st_tran__first_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
task_untied4.c | // Contributed by Allan Porterfield
// 1/26/2010
void lu0(float *);
void fwd(float *, float*);
void sparselu_par_call(float **BENCH)
{
int jj, kk;
#pragma omp parallel
{
#pragma omp single nowait
#pragma omp task untied
{
for (kk=0; kk<100; kk++)
{
lu0(BENCH[kk]);
for (jj=kk+1; jj<100; jj++)
{
#pragma omp task untied firstprivate(kk, jj) shared(BENCH)
if (BENCH[jj] != 0)
{
fwd(BENCH[kk], BENCH[jj]);
}
}
}
#pragma omp taskwait
}
}
}
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickPixelPacket **DestroyPixelThreadSet(MagickPixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (MagickPixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (MagickPixelPacket *) NULL)
pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static MagickPixelPacket **AcquirePixelThreadSet(const Image *image,
const size_t number_images)
{
register ssize_t
i,
j;
MagickPixelPacket
**pixels;
size_t
length,
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(MagickPixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (MagickPixelPacket **) NULL)
return((MagickPixelPacket **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
length=image->columns;
if (length < number_images)
length=number_images;
pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(length,
sizeof(**pixels));
if (pixels[i] == (MagickPixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
for (j=0; j < (ssize_t) length; j++)
GetMagickPixelPacket(image,&pixels[i][j]);
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const MagickPixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const MagickPixelPacket *) x;
color_2=(const MagickPixelPacket *) y;
intensity=(int) MagickPixelIntensity(color_2)-
(int) MagickPixelIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info,
const Quantum pixel,const MagickEvaluateOperator op,
const MagickRealType value)
{
MagickRealType
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(MagickRealType) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a
positive result. It differs from % or fmod() which returns a
'truncated modulus' result, where floor() is replaced by trunc()
and could return a negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel & (size_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale*
pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
ImpulseNoise,value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel << (size_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel | (size_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel >> (size_t) (value+0.5));
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel ^ (size_t) (value+0.5));
break;
}
}
return(result);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,CompositeChannels,op,value,exception);
return(status);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**restrict evaluate_pixels,
zero;
RandomInfo
**restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images,number_images);
if (evaluate_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
random_info=AcquireRandomInfoThreadSet();
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#endif
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict evaluate_indexes;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,
image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) number_images; i++)
evaluate_pixel[i]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),op,evaluate_pixel[i].red);
evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),op,evaluate_pixel[i].green);
evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),op,evaluate_pixel[i].blue);
evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelOpacity(p),op,evaluate_pixel[i].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id],
*indexes,op,evaluate_pixel[i].index);
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue));
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(evaluate_pixel[i/2].opacity));
else
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+i,ClampToQuantum(
evaluate_pixel[i/2].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict evaluate_indexes;
register ssize_t
i,
x;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,
image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
evaluate_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) next->columns; x++)
{
evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].red);
evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].green);
evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].blue);
evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelOpacity(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id],
GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].index);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (op == MeanEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red/=number_images;
evaluate_pixel[x].green/=number_images;
evaluate_pixel[x].blue/=number_images;
evaluate_pixel[x].opacity/=number_images;
evaluate_pixel[x].index/=number_images;
}
if (op == MultiplyEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
{
evaluate_pixel[x].red*=(MagickRealType) QuantumScale;
evaluate_pixel[x].green*=(MagickRealType) QuantumScale;
evaluate_pixel[x].blue*=(MagickRealType) QuantumScale;
evaluate_pixel[x].opacity*=(MagickRealType) QuantumScale;
evaluate_pixel[x].index*=(MagickRealType) QuantumScale;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue));
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(evaluate_pixel[x].opacity));
else
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+x,ClampToQuantum(
evaluate_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#endif
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id],
GetPixelRed(q),op,value)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id],
GetPixelGreen(q),op,value)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id],
GetPixelBlue(q),op,value)));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(ApplyEvaluateOperator(
random_info[id],GetPixelOpacity(q),op,value)));
else
SetPixelAlpha(q,ClampToQuantum(ApplyEvaluateOperator(
random_info[id],(Quantum) GetPixelAlpha(q),op,value)));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ClampToQuantum(ApplyEvaluateOperator(
random_info[id],GetPixelIndex(indexes+x),op,value)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImageChannel)
#endif
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImageChannel method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
% MagickBooleanType FunctionImageChannel(Image *image,
% const ChannelType channel,const MagickFunction function,
% const ssize_t number_parameters,const double *argument,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
MagickRealType
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
* Polynomial
* Parameters: polynomial constants, highest to lowest order
* For example: c0*x^3 + c1*x^2 + c2*x + c3
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel + parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
/* Sinusoid Function
* Parameters: Freq, Phase, Ampl, bias
*/
double freq,phase,ampl,bias;
freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0;
ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI*
(freq*QuantumScale*pixel + phase/360.0) )) + bias ) );
break;
}
case ArcsinFunction:
{
/* Arcsin Function (peged at range limits for invalid results)
* Parameters: Width, Center, Range, Bias
*/
double width,range,center,bias;
width = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result = 2.0/width*(QuantumScale*pixel - center);
if ( result <= -1.0 )
result = bias - range/2.0;
else if ( result >= 1.0 )
result = bias + range/2.0;
else
result=(MagickRealType) (range/MagickPI*asin((double) result)+bias);
result *= QuantumRange;
break;
}
case ArctanFunction:
{
/* Arctan Function
* Parameters: Slope, Center, Range, Bias
*/
double slope,range,center,bias;
slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center));
result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double)
result) + bias ) );
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FunctionImageChannel(image,CompositeChannels,function,
number_parameters,parameters,exception);
return(status);
}
MagickExport MagickBooleanType FunctionImageChannel(Image *image,
const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ApplyFunction(GetPixelRed(q),function,
number_parameters,parameters,exception));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function,
number_parameters,parameters,exception));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function,
number_parameters,parameters,exception));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function,
number_parameters,parameters,exception));
else
SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function,
number_parameters,parameters,exception));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function,
number_parameters,parameters,exception));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FunctionImageChannel)
#endif
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageChannelExtrema method is:
%
% MagickBooleanType GetImageChannelExtrema(const Image *image,
% const ChannelType channel,size_t *minima,size_t *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
return(GetImageChannelExtrema(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image,
const ChannelType channel,size_t *minima,size_t *maxima,
ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageChannelRange(image,channel,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMean() returns the mean and standard deviation of one or more
% image channels.
%
% The format of the GetImageChannelMean method is:
%
% MagickBooleanType GetImageChannelMean(const Image *image,
% const ChannelType channel,double *mean,double *standard_deviation,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelMean(const Image *image,
const ChannelType channel,double *mean,double *standard_deviation,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[RedChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[RedChannel].variance-
channel_statistics[RedChannel].mean*
channel_statistics[RedChannel].mean;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[GreenChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[GreenChannel].variance-
channel_statistics[GreenChannel].mean*
channel_statistics[GreenChannel].mean;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlueChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlueChannel].variance-
channel_statistics[BlueChannel].mean*
channel_statistics[BlueChannel].mean;
channels++;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[OpacityChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[OpacityChannel].variance-
channel_statistics[OpacityChannel].mean*
channel_statistics[OpacityChannel].mean;
channels++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlackChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlackChannel].variance-
channel_statistics[BlackChannel].mean*
channel_statistics[BlackChannel].mean;
channels++;
}
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].standard_deviation=
sqrt(channel_statistics[CompositeChannels].standard_deviation/channels);
*mean=channel_statistics[CompositeChannels].mean;
*standard_deviation=channel_statistics[CompositeChannels].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelKurtosis() returns the kurtosis and skewness of one or more
% image channels.
%
% The format of the GetImageChannelKurtosis method is:
%
% MagickBooleanType GetImageChannelKurtosis(const Image *image,
% const ChannelType channel,double *kurtosis,double *skewness,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image,
const ChannelType channel,double *kurtosis,double *skewness,
ExceptionInfo *exception)
{
double
area,
mean,
standard_deviation,
sum_squares,
sum_cubes,
sum_fourth_power;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*kurtosis=0.0;
*skewness=0.0;
area=0.0;
mean=0.0;
standard_deviation=0.0;
sum_squares=0.0;
sum_cubes=0.0;
sum_fourth_power=0.0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
mean+=GetPixelRed(p);
sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p);
sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*
GetPixelRed(p)*GetPixelRed(p);
area++;
}
if ((channel & GreenChannel) != 0)
{
mean+=GetPixelGreen(p);
sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p);
sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p);
sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
area++;
}
if ((channel & BlueChannel) != 0)
{
mean+=GetPixelBlue(p);
sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p);
sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
area++;
}
if ((channel & OpacityChannel) != 0)
{
mean+=GetPixelOpacity(p);
sum_squares+=(double) GetPixelOpacity(p)*GetPixelOpacity(p);
sum_cubes+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p);
sum_fourth_power+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p)*GetPixelOpacity(p);
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
mean+=GetPixelIndex(indexes+x);
sum_squares+=(double) GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
sum_cubes+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
sum_fourth_power+=(double) GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
area++;
}
p++;
}
}
if (y < (ssize_t) image->rows)
return(MagickFalse);
if (area != 0.0)
{
mean/=area;
sum_squares/=area;
sum_cubes/=area;
sum_fourth_power/=area;
}
standard_deviation=sqrt(sum_squares-(mean*mean));
if (standard_deviation != 0.0)
{
*kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares-
3.0*mean*mean*mean*mean;
*kurtosis/=standard_deviation*standard_deviation*standard_deviation*
standard_deviation;
*kurtosis-=3.0;
*skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean;
*skewness/=standard_deviation*standard_deviation*standard_deviation;
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelRange() returns the range of one or more image channels.
%
% The format of the GetImageChannelRange method is:
%
% MagickBooleanType GetImageChannelRange(const Image *image,
% const ChannelType channel,double *minima,double *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,
double *minima,double *maxima,ExceptionInfo *exception)
{
return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelRange(const Image *image,
const ChannelType channel,double *minima,double *maxima,
ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*maxima=(-MagickHuge);
*minima=MagickHuge;
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
{
if (pixel.red < *minima)
*minima=(double) pixel.red;
if (pixel.red > *maxima)
*maxima=(double) pixel.red;
}
if ((channel & GreenChannel) != 0)
{
if (pixel.green < *minima)
*minima=(double) pixel.green;
if (pixel.green > *maxima)
*maxima=(double) pixel.green;
}
if ((channel & BlueChannel) != 0)
{
if (pixel.blue < *minima)
*minima=(double) pixel.blue;
if (pixel.blue > *maxima)
*maxima=(double) pixel.blue;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
if (pixel.opacity < *minima)
*minima=(double) pixel.opacity;
if (pixel.opacity > *maxima)
*maxima=(double) pixel.opacity;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((double) GetPixelIndex(indexes+x) < *minima)
*minima=(double) GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > *maxima)
*maxima=(double) GetPixelIndex(indexes+x);
}
p++;
}
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelStatistics() returns statistics for each channel in the
% image. The statistics include the channel depth, its minima, maxima, mean,
% standard deviation, kurtosis and skewness. You can access the red channel
% mean, for example, like this:
%
% channel_statistics=GetImageChannelStatistics(image,exception);
% red_mean=channel_statistics[RedChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageChannelStatistics method is:
%
% ChannelStatistics *GetImageChannelStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
channels,
depth,
length;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length,
sizeof(*channel_statistics));
if (channel_statistics == (ChannelStatistics *) NULL)
return(channel_statistics);
(void) ResetMagickMemory(channel_statistics,0,length*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickHuge);
channel_statistics[i].minima=MagickHuge;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; )
{
if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[RedChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelRed(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelRed(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[RedChannel].depth++;
continue;
}
}
if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[GreenChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelGreen(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelGreen(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[GreenChannel].depth++;
continue;
}
}
if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlueChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelBlue(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelBlue(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[BlueChannel].depth++;
continue;
}
}
if (image->matte != MagickFalse)
{
if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[OpacityChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelOpacity(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelOpacity(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[OpacityChannel].depth++;
continue;
}
}
}
if (image->colorspace == CMYKColorspace)
{
if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlackChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelIndex(indexes+x) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelIndex(indexes+x),range),range) ?
MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[BlackChannel].depth++;
continue;
}
}
}
if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima)
channel_statistics[RedChannel].minima=(double) GetPixelRed(p);
if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima)
channel_statistics[RedChannel].maxima=(double) GetPixelRed(p);
channel_statistics[RedChannel].sum+=GetPixelRed(p);
channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)*
GetPixelRed(p);
channel_statistics[RedChannel].sum_cubed+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
channel_statistics[RedChannel].sum_fourth_power+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima)
channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p);
if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima)
channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p);
channel_statistics[GreenChannel].sum+=GetPixelGreen(p);
channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)*
GetPixelGreen(p);
channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
channel_statistics[GreenChannel].sum_fourth_power+=(double)
GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p);
if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima)
channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p);
if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima)
channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p);
channel_statistics[BlueChannel].sum+=GetPixelBlue(p);
channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)*
GetPixelBlue(p);
channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
channel_statistics[BlueChannel].sum_fourth_power+=(double)
GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
if (image->matte != MagickFalse)
{
if ((double) GetPixelOpacity(p) < channel_statistics[OpacityChannel].minima)
channel_statistics[OpacityChannel].minima=(double)
GetPixelOpacity(p);
if ((double) GetPixelOpacity(p) > channel_statistics[OpacityChannel].maxima)
channel_statistics[OpacityChannel].maxima=(double)
GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum+=GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_squared+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_cubed+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_fourth_power+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p);
}
if (image->colorspace == CMYKColorspace)
{
if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima)
channel_statistics[BlackChannel].minima=(double)
GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima)
channel_statistics[BlackChannel].maxima=(double)
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_squared+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_cubed+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_fourth_power+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
}
x++;
p++;
}
}
area=(double) image->columns*image->rows;
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].sum/=area;
channel_statistics[i].sum_squared/=area;
channel_statistics[i].sum_cubed/=area;
channel_statistics[i].sum_fourth_power/=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
channel_statistics[i].standard_deviation=sqrt(
channel_statistics[i].variance-(channel_statistics[i].mean*
channel_statistics[i].mean));
}
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double)
channel_statistics[CompositeChannels].depth,(double)
channel_statistics[i].depth);
channel_statistics[CompositeChannels].minima=MagickMin(
channel_statistics[CompositeChannels].minima,
channel_statistics[i].minima);
channel_statistics[CompositeChannels].maxima=EvaluateMax(
channel_statistics[CompositeChannels].maxima,
channel_statistics[i].maxima);
channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum;
channel_statistics[CompositeChannels].sum_squared+=
channel_statistics[i].sum_squared;
channel_statistics[CompositeChannels].sum_cubed+=
channel_statistics[i].sum_cubed;
channel_statistics[CompositeChannels].sum_fourth_power+=
channel_statistics[i].sum_fourth_power;
channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean;
channel_statistics[CompositeChannels].variance+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
}
channels=3;
if (image->matte != MagickFalse)
channels++;
if (image->colorspace == CMYKColorspace)
channels++;
channel_statistics[CompositeChannels].sum/=channels;
channel_statistics[CompositeChannels].sum_squared/=channels;
channel_statistics[CompositeChannels].sum_cubed/=channels;
channel_statistics[CompositeChannels].sum_fourth_power/=channels;
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].variance/=channels;
channel_statistics[CompositeChannels].standard_deviation=
sqrt(channel_statistics[CompositeChannels].standard_deviation/channels);
channel_statistics[CompositeChannels].kurtosis/=channels;
channel_statistics[CompositeChannels].skewness/=channels;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
if (channel_statistics[i].standard_deviation == 0.0)
continue;
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-
3.0*channel_statistics[i].mean*channel_statistics[i].sum_squared+
2.0*channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)/(channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-
4.0*channel_statistics[i].mean*channel_statistics[i].sum_cubed+
6.0*channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)/(channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation)-3.0;
}
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
% Image *PolynomialImageChannel(const Image *images,
% const size_t number_terms,const ChannelType channel,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o channel: the channel.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
Image
*polynomial_image;
polynomial_image=PolynomialImageChannel(images,DefaultChannels,number_terms,
terms,exception);
return(polynomial_image);
}
MagickExport Image *PolynomialImageChannel(const Image *images,
const ChannelType channel,const size_t number_terms,const double *terms,
ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**restrict polynomial_pixels,
zero;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images,number_images);
if (polynomial_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict polynomial_indexes;
register MagickPixelPacket
*polynomial_pixel;
register PixelPacket
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_indexes=GetCacheViewAuthenticIndexQueue(polynomial_view);
polynomial_pixel=polynomial_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
polynomial_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
if (i >= (ssize_t) number_terms)
break;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
coefficient,
degree;
coefficient=terms[i << 1];
degree=terms[(i << 1)+1];
polynomial_pixel[x].red+=coefficient*pow(QuantumScale*p->red,degree);
polynomial_pixel[x].green+=coefficient*pow(QuantumScale*p->green,
degree);
polynomial_pixel[x].blue+=coefficient*pow(QuantumScale*p->blue,degree);
polynomial_pixel[x].opacity+=coefficient*pow(QuantumScale*
(QuantumRange-p->opacity),degree);
if (image->colorspace == CMYKColorspace)
polynomial_pixel[x].index+=coefficient*pow(QuantumScale*indexes[x],
degree);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].blue));
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
else
SetPixelAlpha(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(polynomial_indexes+x,ClampToQuantum(QuantumRange*
polynomial_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PolynomialImages)
#endif
proceed=SetImageProgress(images,PolynomialImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
register ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishMagickMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
register ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireQuantumMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) ResetMagickMemory(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
register SkipList
*list;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,pixel);
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetStandardDeviationPixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum,
sum_squared;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(GetPixelRed(pixel));
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(GetPixelGreen(pixel));
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(GetPixelBlue(pixel));
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(GetPixelOpacity(pixel));
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(GetPixelIndex(indexes));
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static inline MagickRealType MagickAbsoluteValue(const MagickRealType x)
{
if (x < 0)
return(-x);
return(x);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register ListNode
*root;
register SkipList
*list;
register ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**restrict pixel_list;
size_t
neighbor_height,
neighbor_width;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) :
width;
neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) :
height;
pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict statistic_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y-
(ssize_t) (neighbor_height/2L),image->columns+neighbor_width,
neighbor_height,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*restrict s;
register const PixelPacket
*restrict r;
register ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) neighbor_height; v++)
{
for (u=0; u < (ssize_t) neighbor_width; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+neighbor_width;
s+=image->columns+neighbor_width;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+x+
neighbor_width*neighbor_height/2,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=pixel;
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_StatisticImage)
#endif
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
utils.c | #include <cdnn/utils.h>
float cache;
int return_cache;
int nn_threads;
/**!
* Creates a matrix filled with zeros.
* @param dims An array of matrix dimensions (int)[rows,columns]
* @result A pointer to the created matrix.
* @return A pointer to the created matrix.
*/
dARRAY * zeros(int * dims){
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)calloc(dims[0]*dims[1],sizeof(float));
matrix->shape[0] = dims[0];
matrix->shape[1] = dims[1];
return matrix;
}
/**!
* Creates a matrix filled with ones.
* @param dims An array of matrix dimensions (int)[rows,columns]
* @result A pointer to the created matrix.
* @return A pointer to the created matrix.
*/
dARRAY * ones(int * dims){
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)malloc(sizeof(float)*(dims[0]*dims[1]));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads)
for(int i=0;i<dims[0]*dims[1];i++){
matrix->matrix[i]=1;
}
matrix->shape[0] = dims[0];
matrix->shape[1] = dims[1];
return matrix;
}
/**!
* Creates an identity matrix.
* @param dims An array of matrix dimensions (int)[rows,columns]
* @result A pointer of identity matrix.
* @return A pointer of identity matrix.
*/
dARRAY * eye(int * dims){
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)calloc((dims[0]*dims[1]),sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) collapse(1)
for(int i=0;i<dims[0]; i++){
for(int j=0;j<dims[1];j++)
matrix->matrix[i*dims[1]+j] = i==j ? 1: 0;
}
matrix->shape[0] = dims[0];
matrix->shape[1] = dims[1];
return matrix;
}
/**!
* Finds the transpose of the given matrix.
* @param Matrix The input Matrix of dARRAY Object
* @result A pointer to the result of Transpose(Matrix)
* @return A pointer to the result of Transpose(Matrix)
*/
dARRAY * transpose(dARRAY * restrict Matrix){
if(Matrix==NULL){
printf("\033[1;31mError:\033[93m Matrix is empty. Call transpose() only after intializing dARRAY object.\033[0m\n");
exit(EXIT_FAILURE);
}
if(Matrix->shape[0]==1 && Matrix->shape[1]==1) return Matrix;
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)calloc(Matrix->shape[0]*Matrix->shape[1],sizeof(float));
#pragma omp task
cblas_somatcopy(CblasRowMajor,CblasTrans,Matrix->shape[0],Matrix->shape[1],1,Matrix->matrix,Matrix->shape[1],matrix->matrix,Matrix->shape[0]);
matrix->shape[0] = Matrix->shape[1];
matrix->shape[1] = Matrix->shape[0];
return matrix;
}
/**!
* Finds the transpose of the given matrix (legacy implementation leaving it here for reference (fast transpose without using CBLAS)).
* @param Matrix The input Matrix of dARRAY Object
* @result A pointer to the result of Transpose_my(Matrix)
* @return A pointer to the result of Transpose_my(Matrix)
*/
dARRAY * transpose_my(dARRAY * restrict Matrix){
if(Matrix==NULL){
printf("\033[1;31mError:\033[93m Matrix is empty. Call transpose() only after intializing dARRAY object.\033[0m\n");
return NULL;
}
if(Matrix->shape[0]==1 && Matrix->shape[1]==1) return Matrix;
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)calloc(Matrix->shape[0]*Matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(Matrix,matrix) schedule(static)
for(int i=0;i<Matrix->shape[0];i++)
for(int j=0;j<Matrix->shape[1];j++)
matrix->matrix[j*Matrix->shape[0]+i] = Matrix->matrix[i*Matrix->shape[1]+j];
matrix->shape[0] = Matrix->shape[1];
matrix->shape[1] = Matrix->shape[0];
return matrix;
}
/**!
* Finds the dot product (Matrix Multiplication) of two matrices.
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of dot(MatrixA,MatrixB)
* @return A pointer to the result of dot(MatrixA,MatrixB)
*/
dARRAY * dot(dARRAY * MatrixA, dARRAY * MatrixB){
if(MatrixA->shape[1]!=MatrixB->shape[0]){
printf("\033[1;31mError:\033[93m Shape error while performing dot(). Matrix dimensions do not align. %d(dim1) != %d(dim0)\033[0m\n",MatrixA->shape[1],MatrixB->shape[0]);
exit(EXIT_FAILURE);
}
if(MatrixA == NULL){
printf("\033[1;31mError:\033[93m MatrixA is empty. Call dot() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
if(MatrixB == NULL){
printf("\033[1;31mError:\033[93m MatrixB is empty. Call dot() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
long long int m,n,k;
m = MatrixA->shape[0];
n = MatrixB->shape[1];
k = MatrixB->shape[0];
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(m*n,sizeof(float));
#pragma omp task
cblas_sgemm(CblasRowMajor,\
CblasNoTrans,\
CblasNoTrans,\
m,n,k,\
1,\
MatrixA->matrix,\
k,\
MatrixB->matrix,\
n,\
0,\
result->matrix,\
n);
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixB->shape[1];
return result;
}
/**!
* Finds the dot product (Matrix Multiplication) of two matrices (legacy implementation leaving here for reference (fast matrix multiplication without using CBLAS)).
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of dot_my(MatrixA,MatrixB)
* @return A pointer to the result of dot_my(MatrixA,MatrixB)
*/
dARRAY * dot_my(dARRAY * MatrixA, dARRAY * MatrixB){
if(MatrixA->shape[1]!=MatrixB->shape[0]){
printf("\033[1;31mError:\033[93m Shape error while performing dot(). Matrix dimensions do not align. %d(dim1) != %d(dim0)\033[0m\n",MatrixA->shape[1],MatrixB->shape[0]);
return NULL;
}
if(MatrixB == NULL || MatrixA == NULL){
printf("\033[1;31mError:\033[93m One of the input matrices is empty. Call dot() only after initializing dARRAY object\033[0m\n");
return NULL;
}
dARRAY * BT = NULL;
dARRAY * result = NULL;
result = (dARRAY *)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(MatrixA->shape[0]*MatrixB->shape[1],sizeof(float));
BT = transpose(MatrixB);
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) collapse(1) schedule(static)
for(int i=0;i<MatrixA->shape[0];i++){
for(int j=0;j<MatrixB->shape[1];j++){
for(int k=0;k<MatrixB->shape[0];k++){
result->matrix[i * MatrixB->shape[1]+j] += MatrixA->matrix[i*MatrixA->shape[1]+k] * BT->matrix[j*MatrixB->shape[0]+k];
}
}
}
free2d(BT);
BT = NULL;
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixB->shape[1];
return result;
}
/**!
* Function performs element-wise multiplication on two matrices.
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of multiply(MatrixA,MatrixB)
* @return A pointer to the result of multiply(MatrixA,MatrixB)
*/
dARRAY * multiply(dARRAY * restrict MatrixA, dARRAY * restrict MatrixB){
if(MatrixA == NULL){
printf("\033[1;31mError:\033[93m MatrixA is empty. Call multiply() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
if(MatrixB == NULL){
printf("\033[1;31mError:\033[93m MatrixB is empty. Call multiply() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * temp = NULL;
int x = 0, y = 0;
#pragma omp sections nowait
{
#pragma omp section
x = size(MatrixA);
#pragma omp section
y = size(MatrixB);
}
int flag = 0;
if(x>y){
temp = b_cast(MatrixA,MatrixB);
flag=1;
}
else if(x<y){
temp = b_cast(MatrixB,MatrixA);
flag=1;
}
if(temp==NULL && flag){
printf("\033[1;31mError:\033[93m Could not perform multiply(). Please check shape of input matrices.\033[0m\n");
return NULL;
}
//since both the matrices must have the same dimensions, we can use shape of any matrix
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
if(x==y){
omp_set_num_threads(nn_threads);
int i = 0;
int m = MatrixA->shape[0];
int n = MatrixA->shape[1];
float * matrixA, *matrixB,*res_matrix;
matrixA = MatrixA->matrix;
matrixB = MatrixB->matrix;
res_matrix = result->matrix;
#pragma omp parallel for num_threads(nn_threads) shared(matrixA,matrixB,res_matrix,m,n) private(i) schedule(static)
for(i=0;i<m*n;i++)
res_matrix[i] = matrixA[i] * matrixB[i];
}
else{
omp_set_num_threads(nn_threads);
int i = 0;
int m = MatrixA->shape[0];
int n = MatrixA->shape[1];
float * matrixA, *matrixB,*res_matrix,*temp_matrix;
matrixA = MatrixA->matrix;
matrixB = MatrixB->matrix;
temp_matrix = temp->matrix;
res_matrix = result->matrix;
#pragma omp parallel for num_threads(nn_threads) shared(matrixA,matrixB,res_matrix,temp_matrix,m,n,x,y) private(i) schedule(static)
for(i=0;i<m*n;i++)
res_matrix[i] = x>y ? matrixA[i] * temp_matrix[i] : temp_matrix[i] * matrixB[i];
}
if(temp!=NULL)
free2d(temp);
temp = NULL;
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixA->shape[1];
return result;
}
/**!
* Function performs element-wise divison on two matrices.
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of divison(MatrixA,MatrixB)
* @return A pointer to the result of divison(MatrixA,MatrixB)
*/
dARRAY * divison(dARRAY * restrict MatrixA, dARRAY * restrict MatrixB){
if(MatrixA == NULL){
printf("\033[1;31mError:\033[93m MatrixA is empty. Call divison() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
if(MatrixB == NULL){
printf("\033[1;31mError:\033[93m MatrixB is empty. Call divison() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * temp = NULL;
int x = size(MatrixA);
int y = size(MatrixB);
int flag=0;
if(x>y){
temp = b_cast(MatrixA,MatrixB);
flag=1;
}
else if(x<y){
temp = b_cast(MatrixB,MatrixA);
flag=1;
}
if(temp==NULL && flag){
printf("\033[1;31mError:\033[93m Could not perform divison(). Please check shape of input matrices.\033[0m\n");
return NULL;
}
//since both the matrices must have the same dimensions, we can use shape of any matrix
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
if(x==y){
omp_set_num_threads(nn_threads);
int i = 0;
int m = MatrixA->shape[0];
int n = MatrixA->shape[1];
float * matrixA, *matrixB,*res_matrix;
matrixA = MatrixA->matrix;
matrixB = MatrixB->matrix;
res_matrix = result->matrix;
#pragma omp parallel for num_threads(nn_threads) shared(matrixA,matrixB,res_matrix,m,n) private(i) schedule(static)
for(i=0;i<m*n;i++){
res_matrix[i] = matrixA[i] / matrixB[i];
}
}
else{
omp_set_num_threads(nn_threads);
int i = 0;
int m = MatrixA->shape[0];
int n = MatrixA->shape[1];
float * matrixA, *matrixB,*res_matrix,*temp_matrix;
matrixA = MatrixA->matrix;
matrixB = MatrixB->matrix;
temp_matrix = temp->matrix;
res_matrix = result->matrix;
#pragma omp parallel for num_threads(nn_threads) shared(matrixA,matrixB,res_matrix,temp_matrix,m,n,x,y) private(i) schedule(static)
for(i=0;i<m*n;i++)
res_matrix[i] = x>y ? matrixA[i] / temp_matrix[i] : temp_matrix[i] / matrixB[i];
}
if(temp!=NULL)
free2d(temp);
temp = NULL;
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixA->shape[1];
return result;
}
/**!
* Function performs element-wise addition on two matrices.
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of add(MatrixA,MatrixB)
* @return A pointer to the result of add(MatrixA,MatrixB)
*/
dARRAY * add(dARRAY * MatrixA, dARRAY * MatrixB){
if(MatrixA == NULL){
printf("\033[1;31mError:\033[93m MatrixA is empty. Call add() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
if(MatrixB == NULL){
printf("\033[1;31mError:\033[93m MatrixB is empty. Call add() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * bcast_arr = NULL;
int x = size(MatrixA);
int y = size(MatrixB);
int flag=0;
if(x>y){
bcast_arr = b_cast(MatrixA,MatrixB);
flag=1;
}
else if(x<y){
bcast_arr = b_cast(MatrixB,MatrixA);
flag=1;
}
if(bcast_arr==NULL && flag){
printf("\033[1;31mError:\033[93m Could not perform add(). Please check shape of input matrices.\033[0m\n");
return NULL;
}
//since both the matrices must have the same dimensions, we can use shape of any matrix
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
if(x==y){
cblas_scopy(MatrixB->shape[0]*MatrixB->shape[1],MatrixB->matrix,1,result->matrix,1);
cblas_saxpy(MatrixA->shape[0]*MatrixA->shape[1],1,MatrixA->matrix,1,result->matrix,1);
}
else{
if(x>y){
cblas_scopy(MatrixA->shape[0]*MatrixA->shape[1],bcast_arr->matrix,1,result->matrix,1);
cblas_saxpy(MatrixA->shape[0]*MatrixA->shape[1],1,MatrixA->matrix,1,result->matrix,1);
}
else{
cblas_scopy(MatrixB->shape[0]*MatrixB->shape[1],MatrixB->matrix,1,result->matrix,1);
cblas_saxpy(MatrixA->shape[0]*MatrixA->shape[1],1,bcast_arr->matrix,1,result->matrix,1);
}
}
if(bcast_arr!=NULL)
free2d(bcast_arr);
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixA->shape[1];
return result;
}
/**!
* Function performs element-wise subtraction on two matrices.
* @param MatrixA First Matrix
* @param MatrixB Second Matrix
* @result Returns a pointer to the result of subtract(MatrixA,MatrixB)
* @return A pointer to the result of subtract(MatrixA,MatrixB)
*/
dARRAY * subtract(dARRAY * MatrixA, dARRAY * MatrixB){
if(MatrixA == NULL){
printf("\033[1;31mError:\033[93m MatrixA is empty. Call subtract() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
if(MatrixB == NULL){
printf("\033[1;31mError:\033[93m MatrixB is empty. Call subtract() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * bcast_arr = NULL;
int x = size(MatrixA);
int y = size(MatrixB);
int flag=0;
if(x>y){
bcast_arr = b_cast(MatrixA,MatrixB);
}
else if(x<y){
bcast_arr = b_cast(MatrixB,MatrixA);
flag=1;
}
if(bcast_arr==NULL && flag==1){
printf("\033[1;31mError:\033[93m Could not perform subtract(). Please check shape of input matrices.\033[0m\n");
return NULL;
}
//since both the matrices must have the same dimensions, we can use shape of any matrix
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
if(x==y){
cblas_scopy(MatrixA->shape[0]*MatrixA->shape[1],MatrixA->matrix,1,result->matrix,1);
cblas_saxpy(MatrixB->shape[0]*MatrixB->shape[1],-1,MatrixB->matrix,1,result->matrix,1);
}
else{
if(x>y){
cblas_scopy(MatrixA->shape[0]*MatrixA->shape[1],MatrixA->matrix,1,result->matrix,1);
cblas_saxpy(MatrixA->shape[0]*MatrixA->shape[1],-1,bcast_arr->matrix,1,result->matrix,1);
}
else{
cblas_scopy(bcast_arr->shape[0]*bcast_arr->shape[1],bcast_arr->matrix,1,result->matrix,1);
cblas_saxpy(MatrixA->shape[0]*MatrixA->shape[1],-1,MatrixB->matrix,1,result->matrix,1);
}
}
if(bcast_arr!=NULL)
free2d(bcast_arr);
bcast_arr = NULL;
result->shape[0] = MatrixA->shape[0];
result->shape[1] = MatrixA->shape[1];
return result;
}
/**!
* Function Adds a scalar value to each element of a matrix.
* @param matrix A matrix of dARRAY Object.
* @param scalar A scalar value that needs to be added to each element of matrix.
* @result A pointer to the result of addScalar(matrix,scalar)
* @return A pointer to the result of addScalar(matrix,scalar)
*/
dARRAY * addScalar(dARRAY * matrix, float scalar){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call addScalar() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result,scalar) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = matrix->matrix[i] + scalar;
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function subtracts a scalar value from each element of a matrix.
* @param matrix A matrix of dARRAY Object.
* @param scalar A scalar value that needs to be subtracted from each element of matrix.
* @result A pointer to the result of subScalar(matrix,scalar)
* @return A pointer to the result of subScalar(matrix,scalar)
*/
dARRAY * subScalar(dARRAY * matrix, float scalar){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call subScalar() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result,scalar) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = matrix->matrix[i] - scalar;
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function multiplies a scalar value with each element of a matrix.
* @param matrix A matrix of dARRAY Object
* @param scalar A scalar value that needs to be multiplied with each element of matrix.
* @result A pointer to the result of mulScalar(matrix,scalar)
* @return A pointer to the result of mulScalar(matrix,scalar)
*/
dARRAY * mulScalar(dARRAY * matrix, float scalar){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call mulScalar() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result,scalar) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = matrix->matrix[i] * scalar;
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
dARRAY * mulScalarm(dARRAY * matrix, float scalar){
if(matrix==NULL){
printf("\033[1;31mError:\033[93m Matrix is empty. Call divScalar() only after intializing dARRAY object.\033[0m\n");
return NULL;
}
float * div_mat = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
#pragma omp task
cblas_sscal(matrix->shape[0]*matrix->shape[1],scalar,matrix->matrix,1);
#pragma omp task
cblas_scopy(matrix->shape[0]*matrix->shape[1],matrix->matrix,1,div_mat,1);
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = div_mat;
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function divides a scalar value with each element of a matrix.
* @param matrix A matrix of dARRAY Object.
* @param scalar A scalar value that needs to be divided with each element of matrix.
* @result A pointer to the result of divScalar(matrix,scalar)
* @return A pointer to the result of divScalar(matrix,scalar)
*/
dARRAY * divScalar(dARRAY * matrix, float scalar){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call divScalar() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result,scalar) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = matrix->matrix[i] / scalar;
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
dARRAY * divScalarm(dARRAY * matrix, float scalar){
if(matrix==NULL){
printf("\033[1;31mError:\033[93m Matrix is empty. Call divScalar() only after intializing dARRAY object.\033[0m\n");
return NULL;
}
float * div_mat = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp task
cblas_sscal(matrix->shape[0]*matrix->shape[1],(1/scalar),matrix->matrix,1);
#pragma omp task
cblas_scopy(matrix->shape[0]*matrix->shape[1],matrix->matrix,1,div_mat,1);
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = div_mat;
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function raises the elements of a matrix to the specified power.
* @param matrix A matrix of dARRAY Object
* @param power A value to which each element in matrix must be raised.
* @result A pointer to the result of power(matrix,power)
* @return A pointer to the result of power(matrix,power)
*/
dARRAY * power(dARRAY * matrix, float power){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call power() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result,power) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = (float)pow(matrix->matrix[i],power);
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function finds the sqrt() of the elements of a matrix.
* @param matrix A matrix of dARRAY Object
* @result A pointer to the result of squareroot(matrix)
* @return A pointer to the result of squareroot(matrix)
*/
dARRAY * squareroot(dARRAY * matrix){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call squareroot() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = (float)sqrt(matrix->matrix[i]);
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function finds the exp() of the elements of a matrix.
* @param matrix A matrix of dARRAY Object
* @result A pointer to the result of exponential(matrix)
* @return A pointer to the result of exponential(matrix)
*/
dARRAY * exponentional(dARRAY * matrix){
if(matrix == NULL){
printf("\033[1;31mError:\033[93m matrix is empty. Call exponential() only after initializing dARRAY object\033[0m\n");
exit(EXIT_FAILURE);
}
dARRAY * result = (dARRAY*)malloc(sizeof(dARRAY));
result->matrix = (float*)calloc(matrix->shape[0]*matrix->shape[1],sizeof(float));
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix,result) schedule(static)
for(int i=0; i<matrix->shape[0]*matrix->shape[1]; i++){
result->matrix[i] = exp(matrix->matrix[i]);
}
result->shape[0] = matrix->shape[0];
result->shape[1] = matrix->shape[1];
return result;
}
/**!
* Function performs broadcasting of matrices
* Refer to www.numpy.org for detailed explanation of broadcasting.
* The implementation used here is similar to the one in www.numpy.org.
* @param MatrixA Matrix of dARRAY Object
* @param MatrixB Matrix of dARRAY Object
* @result A pointer to the broadcasted matrix
* @return A pointer to the broadcasted matrix
*/
dARRAY * b_cast(dARRAY * MatrixA, dARRAY * MatrixB){
dARRAY * b_castArr = NULL;
if(MatrixA->shape[1]==MatrixB->shape[1] && MatrixB->shape[0]==1 && MatrixA->shape[0]>MatrixB->shape[0]){
//B matrix has the shape of (1,n)
//we need to copy B m times
//M(5,4) B(1,4) repeat 5 * 4 = 20 times
b_castArr = (dARRAY*)malloc(sizeof(dARRAY));
b_castArr->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
float * bcast_matrix, *matrixB;
bcast_matrix = b_castArr->matrix;
matrixB = MatrixB->matrix;
int m = MatrixA->shape[0];
int n = MatrixB->shape[1];
int i = 0;
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrixB,bcast_matrix,m,n) private(i) schedule(static,8)
for(i=0;i<m*n;i++){
bcast_matrix[i] = matrixB[(i%n)];
}
b_castArr->shape[0] = MatrixA->shape[0];
b_castArr->shape[1] = MatrixB->shape[1];
}
else if(MatrixA->shape[0]==MatrixB->shape[0] && MatrixB->shape[1]==1 && MatrixA->shape[1]>MatrixB->shape[1]){
//B is of the form (m,1)
//A is of (m,n)
//copy column wise.
b_castArr = (dARRAY*)malloc(sizeof(dARRAY));
b_castArr->matrix = (float*)calloc(MatrixA->shape[0]*MatrixA->shape[1],sizeof(float));
int k=0;
float * bcast_matrix, *matrixB;
bcast_matrix = b_castArr->matrix;
matrixB = MatrixB->matrix;
int m = MatrixA->shape[0];
int n = MatrixA->shape[1];
int i = 0;
int j = 0;
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrixB,bcast_matrix,m,n,k) private(i,j) schedule(static,8)
for(i=0;i<m;i++){
//copy b n times
for(j=0;j<n;j++){
bcast_matrix[k] = matrixB[i];
k++;
}
}
b_castArr->shape[0] = MatrixA->shape[0];
b_castArr->shape[1] = MatrixA->shape[1];
}
return b_castArr;
}
/**!
* Function finds the sum of elements of matrix.
* @param matrix A matrix of dARRAY Object
* @param axis If axis == 1, then sums all elements in a row. If axis == 0, then sums all the elements in a column.
* @result A pointer to the result of sum(matrix,axis)
* @return A pointer to the result of sum(matrix,axis)
*/
dARRAY * sum(dARRAY * matrix, int axis){
if(axis!=0 && axis!=1){
printf("\033[1;31mError:\033[93m axis=%d not supported. Instead use axis=0 or axis=1\033[0m\n",axis);
return NULL;
}
// if(matrix->shape[0]==1 || matrix->shape[1]==1) return matrix;
dARRAY * new = (dARRAY*)malloc(sizeof(dARRAY));
new->matrix = NULL;
if(axis==0){
new->matrix = (float*)calloc(matrix->shape[1],sizeof(float));
dARRAY * temp = transpose(matrix);
float sum_ = 0.0f;
int i = 0;
int j = 0;
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(temp,new) private(i,j) reduction(+:sum_)
for(i=0;i<temp->shape[0];i++){
sum_=0.0;
for(j=0;j<temp->shape[1];j++){
sum_+= temp->matrix[i*temp->shape[1]+j];
}
new->matrix[i] = sum_;
}
new->shape[0] = 1;
new->shape[1] = matrix->shape[1];
free2d(temp);
temp=NULL;
}
else if(axis==1){
new->matrix = (float*)calloc(matrix->shape[0],sizeof(float));
omp_set_num_threads(nn_threads);
int j = 0, i = 0;
float temp = 0.0f;
#pragma omp parallel for num_threads(nn_threads) shared(matrix,new) private(i,j) reduction(+:temp)
for(i=0;i<matrix->shape[0];i++){
temp = 0.0;
for(j=0;j<matrix->shape[1];j++){
temp += matrix->matrix[i*matrix->shape[1]+j];
}
new->matrix[i] = temp;
}
new->shape[0] = matrix->shape[0];
new->shape[1] = 1;
}
return new;
}
/**!
* Function finds the frobenius_norm of matrix.
* @param matrix A matrix of dARRAY Object
* @result A pointer to the result of frobenius_norm(matrix)
* @return A pointer to the result of frobenius_norm(matrix)
*/
float frobenius_norm(dARRAY * matrix){
float frobenius_norm = 0.0;
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix) reduction(+:frobenius_norm) schedule(static)
for(int i=0;i<matrix->shape[0]*matrix->shape[1];i++){
frobenius_norm += pow(matrix->matrix[i],2);
}
return frobenius_norm;
}
/**!
* Function finds the Manhattan_distance of matrix.
* @param matrix A matrix of dARRAY Object
* @result Result of Manhattan_distance(matrix)
* @return Result of Manhattan_distance(matrix)
*/
float Manhattan_distance(dARRAY * matrix){
float dist = 0.0;
omp_set_num_threads(nn_threads);
#pragma omp parallel for num_threads(nn_threads) shared(matrix) reduction(+:dist) schedule(static)
for(int i=0;i<matrix->shape[0]*matrix->shape[1];i++){
dist += abs(matrix->matrix[i]);
}
return dist;
}
/**!
* Function generates a matrix of specified dimensions filled with random variables
* from normal distribution with mean 0 and unit standard deviation.
* @param dims An array of matrix dimensions [rows,columns]
* @result A pointer to the generated matrix.
* @return A pointer to the generated matrix.
*/
dARRAY * randn(int * dims){
dARRAY * matrix = (dARRAY*)malloc(sizeof(dARRAY));
matrix->matrix = (float*)malloc(sizeof(float)*dims[0]*dims[1]);
omp_set_num_threads(nn_threads);
#pragma omp parallel for collapse(1) shared(matrix)
for(int i=0;i<dims[0];i++){
for(int j=0;j<dims[1];j++){
matrix->matrix[i*dims[1]+j] = rand_norm(0.0,1.0);
}
}
matrix->shape[0] = dims[0];
matrix->shape[1] = dims[1];
return matrix;
}
/**!
* Function creates an array that contains shuffled indices
* @param length Number of elements in the array to be shuffled
* @result Pointer to the array containing shuffled indices
* @return Pointer to the array containing shuffled indices
*/
int * permutation(int length){
int * permute_arr = (int*)malloc(sizeof(int)*length);
#pragma omp parallel for num_threads(nn_threads) shared(permute_arr)
for(int i=0;i<length;i++){
permute_arr[i] = i;
}
srand(time(NULL));
#pragma omp parallel for
for(int i = length-1;i>0;i--){
int j = rand()%(i+1);
int temp = permute_arr[i];
permute_arr[i] = permute_arr[j];
permute_arr[j] = temp;
}
return permute_arr;
}
/**!
* Function reshapes a given matrix to specified dimensions
* @param matrix Matrix to be reshaped
* @param dims An array of matrix dimension [rows,columns]
* @result Pointer to the reshaped matrix
* @return Pointer to the reshaped matrix
*/
dARRAY * reshape(dARRAY * matrix, int * dims){
if(matrix==NULL){
printf("\033[1;31mError:\033[93m Matrix is empty. Call reshape() only after intializing dARRAY object.\033[0m\n");
return NULL;
}
if(size(matrix)!=(dims[0]*dims[1])){
printf("\033[1;31mError:\033[93m Shape Error. Matrix could not be reshaped to the specified dims.\033[0m\n");
return matrix;
}
matrix->shape[0] = dims[0];
matrix->shape[1] = dims[1];
return matrix;
}
/**!
* Function mean of a matrix.
* @param matrix A matrix of dARRAY Object
* @result Mean of a matrix
* @return Mean of a matrix
*/
float mean(dARRAY * matrix){
if(matrix==NULL){
printf("\033[1;31mError:\033[93m Cannot find mean of empty matrix. Call mean() only after intializing dARRAY object.\033[0m\n");
return (float)0;
}
float sum = 0;
for(int i=0; i<matrix->shape[0]*matrix->shape[1];i++)
sum += matrix->matrix[i];
return sum/(matrix->shape[0]*matrix->shape[1]);
}
/**!
* Function finds the variance of a matrix.
* @param matrix A matrix of dARRAY Object
* @param type if type=='sample' then function finds the sample variance else it finds the population variance.
* @result Variance of the matrix
* @return Variance of the matrix
*/
float var(dARRAY * matrix, char * type){
if(matrix==NULL){
printf("\033[1;31mError:\033[93m Cannot find variance of empty matrix. Call var() only after intializing dARRAY object.\033[0m\n");
return (float)0;
}
float errorSum = 0;
float xbar = mean(matrix);
for(int i=0;i<matrix->shape[0]*matrix->shape[1];i++){
errorSum += pow((matrix->matrix[i]-xbar),2);
}
if(!strcmp(type,(const char *)"sample"))
return errorSum/(matrix->shape[0]*matrix->shape[1]-1);
else if(!strcmp(type,(const char *)"population"))
return errorSum/(matrix->shape[0]*matrix->shape[1]);
else{
printf("\033[1;31mError:\033[93m \"type\" parameter can only take values \"sample\" or \"population\".\033[0m\n");
return (float)0;
}
}
/**!
* Function finds the standard deviation of matrix.
* @param matrix A matrix of dARRAY Object
* @param type if type=='sample' then function finds the sample std else it finds the population std.
* @result Standard deviation of matrix
* @return Standard deviation of matrix
*/
float std(dARRAY * matrix, char * type){
return pow(var(matrix,type), 0.5);
}
/**!
* Helper function of gaussRandom()
* Function generates a random variable with normal distribution.
* @param cache A pointer to the cache value
* @param return_cache A pointer to check if cache has a value.
* @result A random variable of normal distribution.
* @return A random variable of normal distribution.
*/
float gaussGenerator(float * cache, int * return_cache){
if(*return_cache){
*return_cache = 0;
return *cache;
}
//use drand48 to generate random values from uniform distribution
float u = 2.0 * drand48() - 1.0;
float v = 2.0 * drand48() - 1.0;
float r = u*u + v*v;
if(r==0.0 || r>1) return gaussGenerator(cache,return_cache);
float c = sqrt(-2*log(r)/r);
*cache = c*v; //store this in cache
*return_cache = 1;
return u*c;
}
/**!
* Function generates a random variable with normal distribution.
* @result A random variable of normal distribution.
* @return A random variable of normal distribution.
*/
float gaussRandom(){
cache=0.0;
return_cache = 0;
return gaussGenerator(&cache,&return_cache);
}
/**!
* Function generates a random variable with normal distribution with specified mean and standard deviation.
* @param mu Mean
* @param std Standard Deviation
* @result A random variable of normal distribution [X ~ N(mu,std*std)].
* @return A random variable of normal distribution [X ~ N(mu,std*std)].
*/
float rand_norm(float mu, float std){
return mu+gaussRandom()*std;
}
/**!
* Function deallocates a 2D Matrix.
* @param matrix Matrix that needs to be freed.
* @result void
* @return void
*/
void free2d(dARRAY * matrix){
if(matrix==NULL) {
printf("\033[1;93mWarning:\033[93m Matrix is Empty. No need for deallocation.\033[0m\n");
return;
}
free(matrix->matrix);
free(matrix);
// matrix = NULL;
return;
}
/**!
* Function returns the size of the matrix
* @param A Matrix of type dARRAY Object
* @result Total size of the matrix
* @return Total size of the matrix
*/
int size(dARRAY * A){
if(A==NULL){
printf("\033[1;31mError:\033[93m Matrix is Empty. Call size() only after intializing dARRAY object.\033[0m\n");
return 0;
}
return A->shape[0]*A->shape[1];
}
/**!
* Function displays the shape of the matrix
* @param A Matrix of type dARRAY Object
* @result Prints the shape of input matrix
* @return void
*/
void shape(dARRAY * A){
if(A==NULL){
printf("\033[1;31mError:\033[93m Matrix is Empty. Call shape() only after intializing dARRAY object.\033[0m\n");
return;
}
//printf("first element of matrix is : %f\n",A->matrix[0]);
printf("(%d,%d)\n",A->shape[0],A->shape[1]);
}
//Function to create a time delay. Mimicks thread.sleep() of Java
void sleep_my(int milliseconds) {
unsigned int duration = time(0) + (milliseconds/1000);
while(time(0)<duration);
}
//This function is used instead of fflush(stdin) as it is a bad practice to use it
//due to undefined behaviour.
void cleanSTDIN() {
int ch;
while ((ch = getchar()) != '\n' && ch != EOF){}
}
/**!
* Function calculates the safe numbe rof threads to use.
* @return void
*/
void get_safe_nn_threads(){
int num_cpu_cores = sysconf(_SC_NPROCESSORS_CONF);
if(num_cpu_cores<=16){
nn_threads = num_cpu_cores*2;
}
else nn_threads = num_cpu_cores;
} |
NAL.c | /*
* The MIT License
*
* Copyright 2020 The OpenNARS authors.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "NAL.h"
int ruleID = 0;
static void NAL_GeneratePremisesUnifier(int i, Atom atom, int premiseIndex)
{
if(atom)
{
//upper case atoms are treated as variables in the meta rule language
if(Narsese_atomNames[atom-1][0] >= 'A' && Narsese_atomNames[atom-1][0] <= 'Z')
{
//unification failure by inequal value assignment (value at position i versus previously assigned one), and variable binding
printf("subtree = Term_ExtractSubterm(&term%d, %d);\n", premiseIndex, i);
printf("if(substitutions[%d].atoms[0]!=0 && !Term_Equal(&substitutions[%d], &subtree)){ goto RULE_%d; }\n", atom, atom, ruleID);
printf("substitutions[%d] = subtree;\n", atom);
}
else
{
//structural constraint given by copulas at position i
printf("if(term%d.atoms[%d] != %d){ goto RULE_%d; }\n", premiseIndex, i, atom, ruleID);
}
}
}
static void NAL_GenerateConclusionSubstitution(int i, Atom atom)
{
if(atom)
{
if(Narsese_atomNames[atom-1][0] >= 'A' && Narsese_atomNames[atom-1][0] <= 'Z')
{
//conclusion term gets variables substituted
printf("if(!Term_OverrideSubterm(&conclusion,%d,&substitutions[%d])){ goto RULE_%d; }\n", i, atom, ruleID);
}
else
{
//conclusion term inherits structure from meta rule, namely the copula
printf("conclusion.atoms[%d] = %d;\n", i, atom);
}
}
}
static void NAL_GenerateConclusionTerm(char *premise1, char *premise2, char* conclusion, bool doublePremise)
{
Term term1 = Narsese_Term(premise1);
Term term2 = doublePremise ? Narsese_Term(premise2) : (Term) {0};
Term conclusion_term = Narsese_Term(conclusion);
printf("RULE_%d:\n{\n", ruleID++);
//skip double/single premise rule if single/double premise
if(doublePremise) { printf("if(!doublePremise) { goto RULE_%d; }\n", ruleID); }
if(!doublePremise) { printf("if(doublePremise) { goto RULE_%d; }\n", ruleID); }
puts("Term substitutions[27+NUM_ELEMENTS(Narsese_RuleTableVars)] = {0}; Term subtree = {0};"); //27 because of 9 indep, 9 dep, 9 query vars
for(int i=0; i<COMPOUND_TERM_SIZE_MAX; i++)
{
NAL_GeneratePremisesUnifier(i, term1.atoms[i], 1);
}
if(doublePremise)
{
for(int i=0; i<COMPOUND_TERM_SIZE_MAX; i++)
{
NAL_GeneratePremisesUnifier(i, term2.atoms[i], 2);
}
}
puts("Term conclusion = {0};");
for(int i=0; i<COMPOUND_TERM_SIZE_MAX; i++)
{
NAL_GenerateConclusionSubstitution(i, conclusion_term.atoms[i]);
}
}
static void NAL_GenerateRule(char *premise1, char *premise2, char* conclusion, char* truthFunction, bool doublePremise, bool switchTruthArgs)
{
NAL_GenerateConclusionTerm(premise1, premise2, conclusion, doublePremise);
if(switchTruthArgs)
{
printf("Truth conclusionTruth = %s(truth2,truth1);\n", truthFunction);
}
else
{
printf("Truth conclusionTruth = %s(truth1,truth2);\n", truthFunction);
}
puts("NAL_DerivedEvent(RuleTable_Reduce(conclusion, false), conclusionOccurrence, conclusionTruth, conclusionStamp, currentTime, parentPriority, conceptPriority, 0, validation_concept, validation_cid);}\n");
}
static void NAL_GenerateReduction(char *premise1, char* conclusion)
{
NAL_GenerateConclusionTerm(premise1, NULL, conclusion, false);
puts("IN_DEBUG( fputs(\"Reduced: \", stdout); Narsese_PrintTerm(&term1); fputs(\" -> \", stdout); Narsese_PrintTerm(&conclusion); puts(\"\"); ) \nreturn conclusion;\n}");
}
void NAL_GenerateRuleTable()
{
puts("#include \"RuleTable.h\"");
puts("void RuleTable_Apply(Term term1, Term term2, Truth truth1, Truth truth2, long conclusionOccurrence, Stamp conclusionStamp, long currentTime, double parentPriority, double conceptPriority, bool doublePremise, Concept *validation_concept, long validation_cid)\n{\ngoto RULE_0;");
#define H_NAL_RULES
#include "NAL.h"
#undef H_NAL_RULES
printf("RULE_%d:;\n}\n", ruleID);
printf("Term RuleTable_Reduce(Term term1, bool doublePremise)\n{\ngoto RULE_%d;\n", ruleID);
#define H_NAL_REDUCTIONS
#include "NAL.h"
#undef H_NAL_REDUCTIONS
printf("RULE_%d:;\nreturn term1;\n}\n\n", ruleID);
}
void NAL_DerivedEvent(Term conclusionTerm, long conclusionOccurrence, Truth conclusionTruth, Stamp stamp, long currentTime, double parentPriority, double conceptPriority, long occurrenceTimeOffset, Concept *validation_concept, long validation_cid)
{
Event e = { .term = conclusionTerm,
.type = EVENT_TYPE_BELIEF,
.truth = conclusionTruth,
.stamp = stamp,
.occurrenceTime = conclusionOccurrence ,
.creationTime = currentTime };
#pragma omp critical(Memory)
{
if(validation_concept == NULL || validation_concept->id == validation_cid) //concept recycling would invalidate the derivation (allows to lock only adding results to memory)
{
Memory_AddEvent(&e, currentTime, conceptPriority*parentPriority*Truth_Expectation(conclusionTruth), occurrenceTimeOffset, false, true, false, false, false);
}
}
}
|
computeGraph.c | #include "defs.h"
double computeGraph(graph* G, graphSDG* SDGdata)
{
//mcsim_skip_instrs_begin();
VERT_T* endV;
LONG_T *degree, *numEdges, *pos, *pSums;
WEIGHT_T* w;
double elapsed_time;
#ifdef _OPENMP
omp_lock_t *vLock;
LONG_T chunkSize;
#endif
elapsed_time = get_seconds();
#ifdef _OPENMP
omp_set_num_threads(NUM_THREADS);
#endif
#ifdef _OPENMP
#pragma omp parallel
{
#endif
LONG_T i, j, u, n, m, tid, nthreads;
#ifdef DIAGNOSTIC
double elapsed_time_part;
#endif
#ifdef _OPENMP
nthreads = omp_get_num_threads();
tid = omp_get_thread_num();
#else
tid = 0;
nthreads = 1;
#endif
n = N;
m = M;
if (tid == 0) {
#ifdef _OPENMP
vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t));
assert(vLock != NULL);
chunkSize = n/nthreads;
#endif
pos = (LONG_T *) malloc(m*sizeof(LONG_T));
assert(pos != NULL);
degree = (LONG_T *) calloc(n, sizeof(LONG_T));
assert(degree != NULL);
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for schedule(static, chunkSize)
for (i=0; i<n; i++) {
omp_init_lock(&vLock[i]);
}
#pragma omp barrier
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Lock initialization time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#pragma omp for
#endif
for (i=0; i<m; i++) {
u = SDGdata->startVertex[i];
#ifdef _OPENMP
omp_set_lock(&vLock[u]);
#endif
pos[i] = degree[u]++;
#ifdef _OPENMP
omp_unset_lock(&vLock[u]);
#endif
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Degree computation time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for schedule(static, chunkSize)
for (i=0; i<n; i++) {
omp_destroy_lock(&vLock[i]);
}
if (tid == 0)
free(vLock);
#endif
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Lock destruction time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
if (tid == 0) {
numEdges = (LONG_T *) malloc((n+1)*sizeof(LONG_T));
pSums = (LONG_T *) malloc(nthreads*sizeof(LONG_T));
}
#ifdef _OPENMP
#pragma omp barrier
#endif
prefix_sums(degree, numEdges, pSums, n);
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Prefix sums time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
#ifdef _OPENMP
#pragma omp barrier
#endif
if (tid == 0) {
free(degree);
free(pSums);
w = (WEIGHT_T *) malloc(m*sizeof(WEIGHT_T));
endV = (VERT_T *) malloc(m* sizeof(VERT_T));
}
//mcsim_skip_instrs_end();
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for
#endif
for (i=0; i<m; i++) {
mcsim_tx_begin();
u = SDGdata->startVertex[i];
j = numEdges[u] + pos[i];
endV[j] = SDGdata->endVertex[i];
w[j] = SDGdata->weight[i];
mcsim_mem_fence();
mcsim_tx_end();
mcsim_mem_fence();
}
#ifdef DIAGNOSTIC
if (tid == 0) {
elapsed_time_part = get_seconds() - elapsed_time_part;
fprintf(stderr, "Edge data structure construction time: %lf seconds\n",
elapsed_time_part);
elapsed_time_part = get_seconds();
}
#endif
if (tid == 0) {
free(pos);
mcsim_tx_begin();
G->n = n;
G->m = m;
G->numEdges = numEdges;
G->endV = endV;
G->weight = w;
mcsim_mem_fence();
mcsim_tx_end();
mcsim_mem_fence();
}
#ifdef _OPENMP
}
#endif
//mcsim_skip_instrs_begin();
/* Verification */
#if 0
fprintf(stderr, "SDG data:\n");
for (int i=0; i<SDGdata->m; i++) {
fprintf(stderr, "[%ld %ld %ld] ", SDGdata->startVertex[i],
SDGdata->endVertex[i], SDGdata->weight[i]);
}
fprintf(stderr, "\n");
for (int i=0; i<G->n + 1; i++) {
fprintf(stderr, "[%ld] ", G->numEdges[i]);
}
fprintf(stderr, "\nGraph:\n");
for (int i=0; i<G->n; i++) {
for (int j=G->numEdges[i]; j<G->numEdges[i+1]; j++) {
fprintf(stderr, "[%ld %ld %ld] ", i, G->endV[j], G->weight[j]);
}
}
#endif
free(SDGdata->startVertex);
free(SDGdata->endVertex);
free(SDGdata->weight);
elapsed_time = get_seconds() - elapsed_time;
//mcsim_skip_instrs_end();
return elapsed_time;
}
|
gcc-min-max.c | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <omp.h>
#define ITERATIONS 10000
#define ELEMENTS 10240
#define OMP_MIN(x,y) (x<y)?x:y
#define OMP_MAX(x,y) (x>y)?x:y
static inline void atomic_min_i32(int32_t * target, int32_t value)
{
int32_t desired;
int32_t old = __atomic_load_n(target,__ATOMIC_SEQ_CST);
// early exit when no update required
if (old < value) return;
do {
desired = OMP_MIN(old, value);
} while (!__atomic_compare_exchange_n(target, &old, desired, false /* strong */, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) );
}
static inline void atomic_max_i32(int32_t * target, int32_t value)
{
int32_t desired;
int32_t old = __atomic_load_n(target,__ATOMIC_SEQ_CST);
// early exit when no update required
if (old > value) return;
do {
desired = OMP_MAX(old, value);
} while (!__atomic_compare_exchange_n(target, &old, desired, false /* strong */, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) );
}
static inline void atomic_min_i64(int64_t * target, int64_t value)
{
int64_t desired;
int64_t old = __atomic_load_n(target,__ATOMIC_SEQ_CST);
// early exit when no update required
if (old < value) return;
do {
desired = OMP_MIN(old, value);
} while (!__atomic_compare_exchange_n(target, &old, desired, false /* strong */, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) );
}
static inline void atomic_max_i64(int64_t * target, int64_t value)
{
int64_t desired;
int64_t old = __atomic_load_n(target,__ATOMIC_SEQ_CST);
// early exit when no update required
if (old > value) return;
do {
desired = OMP_MAX(old, value);
} while (!__atomic_compare_exchange_n(target, &old, desired, false /* strong */, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) );
}
static inline void atomic_min_r32(float * target, float value)
{
float desired;
float old;
__atomic_load((int32_t*)target,(int32_t*)&old,__ATOMIC_SEQ_CST);
// early exit when no update required
if (old < value) return;
do {
desired = OMP_MIN(old, value);
} while (!__atomic_compare_exchange((int32_t*)target, (int32_t*)&old, (int32_t*)&desired, false /* strong */, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) );
}
static inline void atomic_max_r32(float * target, float value)
{
float desired;
float old;
__atomic_load((int32_t*)target,(int32_t*)&old,__ATOMIC_SEQ_CST);
// early exit when no update required
if (old > value) return;
do {
desired = OMP_MAX(old, value);
} while (!__atomic_compare_exchange((int32_t*)target, (int32_t*)&old, (int32_t*)&desired, false /* strong */, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) );
}
static inline void atomic_min_r64(double * target, double value)
{
double desired;
double old;
__atomic_load((int64_t*)target,(int64_t*)&old,__ATOMIC_SEQ_CST);
// early exit when no update required
if (old < value) return;
do {
desired = OMP_MIN(old, value);
} while (!__atomic_compare_exchange((int64_t*)target, (int64_t*)&old, (int64_t*)&desired, false /* strong */, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) );
}
static inline void atomic_max_r64(double * target, double value)
{
double desired;
double old;
__atomic_load((int64_t*)target,(int64_t*)&old,__ATOMIC_SEQ_CST);
// early exit when no update required
if (old > value) return;
do {
desired = OMP_MAX(old, value);
} while (!__atomic_compare_exchange((int64_t*)target, (int64_t*)&old, (int64_t*)&desired, false /* strong */, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) );
}
int main(int argc, char * argv[])
{
int32_t i32[ELEMENTS];
int64_t i64[ELEMENTS];
float r32[ELEMENTS];
double r64[ELEMENTS];
int32_t i32_min = 100000;
int64_t i64_min = 100000;
float r32_min = 100000;
double r64_min = 100000;
int32_t i32_max = -100000;
int64_t i64_max = -100000;
float r32_max = -100000;
double r64_max = -100000;
double t0, t1, dt;
for (int j=0; j<ELEMENTS; j++) {
int k = (j+1);
i32[j] = k;
i64[j] = k;
r32[j] = k;
r64[j] = k;
}
#pragma omp parallel
{
int me = omp_get_thread_num();
int nt = omp_get_num_threads();
int chunk = ELEMENTS/nt;
if (ELEMENTS % nt !=0) chunk++;
int start = chunk*me;
int stop = chunk*(me+1);
if (stop>ELEMENTS) stop = ELEMENTS;
#pragma omp critical
{
printf("me,nt,chunk,start,stop=%3d %3d %4d %4d %4d\n",me,nt,chunk,start,stop);
}
// MIN
// warmup
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_min_i32(&i32_min,i32[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t0 = omp_get_wtime();
}
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_min_i32(&i32_min,i32[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t1 = omp_get_wtime();
dt = t1 - t0;
printf("%3s: %3s=%10d took %12.7f seconds\n","i32","min",i32_min,dt);
}
// warmup
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_min_i64(&i64_min,i64[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t0 = omp_get_wtime();
}
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_min_i64(&i64_min,i64[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t1 = omp_get_wtime();
dt = t1 - t0;
printf("%3s: %3s=%10lld took %12.7f seconds\n","i64","min",i64_min,dt);
}
// warmup
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_min_r32(&r32_min,r32[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t0 = omp_get_wtime();
}
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_min_r32(&r32_min,r32[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t1 = omp_get_wtime();
dt = t1 - t0;
printf("%3s: %3s=%10.3f took %12.7f seconds\n","r32","min",r32_min,dt);
}
// warmup
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_min_r64(&r64_min,r64[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t0 = omp_get_wtime();
}
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_min_r64(&r64_min,r64[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t1 = omp_get_wtime();
dt = t1 - t0;
printf("%3s: %3s=%10.3f took %12.7f seconds\n","r64","min",r64_min,dt);
}
// MAX
// warmup
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_max_i32(&i32_max,i32[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t0 = omp_get_wtime();
}
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_max_i32(&i32_max,i32[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t1 = omp_get_wtime();
dt = t1 - t0;
printf("%3s: %3s=%10d took %12.7f seconds\n","i32","max",i32_max,dt);
}
// warmup
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_max_i64(&i64_max,i64[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t0 = omp_get_wtime();
}
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_max_i64(&i64_max,i64[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t1 = omp_get_wtime();
dt = t1 - t0;
printf("%3s: %3s=%10lld took %12.7f seconds\n","i64","max",i64_max,dt);
}
// warmup
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_max_r32(&r32_max,r32[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t0 = omp_get_wtime();
}
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_max_r32(&r32_max,r32[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t1 = omp_get_wtime();
dt = t1 - t0;
printf("%3s: %3s=%10.3f took %12.7f seconds\n","r32","max",r32_max,dt);
}
// warmup
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_max_r64(&r64_max,r64[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t0 = omp_get_wtime();
}
for (int i=0; i<ITERATIONS; i++) {
for (int j=start; j<stop; j++) {
{ atomic_max_r64(&r64_max,r64[j]); }
}
}
#pragma omp barrier
#pragma omp master
{
t1 = omp_get_wtime();
dt = t1 - t0;
printf("%3s: %3s=%10.3f took %12.7f seconds\n","r64","max",r64_max,dt);
}
}
return 0;
}
|
parallelJacobi_mp.c | /************************************************************
* Program to solve a finite difference
* discretization of the screened Poisson equation:
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* with zero Dirichlet boundary condition using the iterative
* Jacobi method with overrelaxation.
*
* RHS (source) function
* f(x,y) = -alpha*(1-x^2)(1-y^2)-2*[(1-x^2)+(1-y^2)]
*
* Analytical solution to the PDE
* u(x,y) = (1-x^2)(1-y^2)
*
* Current Version: Christian Iwainsky, RWTH Aachen University
* MPI C Version: Christian Terboven, RWTH Aachen University, 2006
* MPI Fortran Version: Dieter an Mey, RWTH Aachen University, 1999 - 2005
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* Unless READ_INPUT is defined, a meaningful input dataset is used (CT).
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - constant (always greater than 0.0)
* tol - error tolerance for the iterative solver
* relax - Successice Overrelaxation parameter
* mits - maximum iterations for the iterative solver
*
* On output
* : u(n,m) - Dependent variable (solution)
* : f(n,m,alpha) - Right hand side function
*
*************************************************************/
#include <math.h>
#include <mpi.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <omp.h>
/*************************************************************
* Performs one iteration of the Jacobi method and computes
* the residual value.
*
* NOTE: u(0,*), u(maxXCount-1,*), u(*,0) and u(*,maxYCount-1)
* are BOUNDARIES and therefore not part of the solution.
*************************************************************/
/**********************************************************
* Checks the error between numerical and exact solutions
**********************************************************/
double checkSolution(double xStart, double yStart,
int maxXCount, int maxYCount,
double *u,
double deltaX, double deltaY,
double alpha) {
#define U(XX, YY) u[(YY)*maxXCount + (XX)]
int x, y;
double fX, fY;
double localError, error = 0.0;
for (y = 1; y < (maxYCount - 1); y++) {
fY = yStart + (y - 1) * deltaY;
for (x = 1; x < (maxXCount - 1); x++) {
fX = xStart + (x - 1) * deltaX;
localError = U(x, y) - (1.0 - fX * fX) * (1.0 - fY * fY);
error += localError * localError;
}
}
return error;
// return sqrt(error) / ((maxXCount - 2) * (maxYCount - 2));
}
int main(int argc, char **argv) {
int n, m, mits, comm_sz, my_rank;
double alpha, tol, relax;
double maxAcceptableError;
double error;
double global_sum;
double global_error;
double *u, *u_old, *tmp;
int allocCount;
int iterationCount, maxIterationCount;
double t1, t2;
MPI_Comm initialComm;
initialComm = MPI_COMM_WORLD;
MPI_Init(NULL, NULL);
MPI_Comm_size(initialComm, &comm_sz);
//Create mpi cart
int dims[2] = {0, 0};
MPI_Dims_create(comm_sz, 2, dims);
int periods[2] = {false, false};
MPI_Comm comm;
MPI_Cart_create(initialComm, 2, dims, periods, true, &comm);
MPI_Comm_rank(comm, &my_rank);
int my_coords[2];
MPI_Cart_coords(comm, my_rank, 2, my_coords);
if (my_rank == 0) {
// printf("Input n,m - grid dimension in x,y direction:\n");
scanf("%d,%d", &n, &m);
// printf("Input alpha - Helmholtz constant:\n");
scanf("%lf", &alpha);
// printf("Input relax - successive over-relaxation parameter:\n");
scanf("%lf", &relax);
// printf("Input tol - error tolerance for the iterrative solver:\n");
scanf("%lf", &tol);
// printf("Input mits - maximum solver iterations:\n");
scanf("%d", &mits);
printf("-> rank %d : %d, %d, %g, %g, %g, %d\n", my_rank, n, m, alpha, relax, tol, mits);
allocCount = (n + 2) * (m + 2);
}
MPI_Bcast(&n, 1, MPI_INT, 0, comm); //todo: make all in parent
MPI_Bcast(&m, 1, MPI_INT, 0, comm);
MPI_Bcast(&alpha, 1, MPI_DOUBLE, 0, comm);
MPI_Bcast(&relax, 1, MPI_DOUBLE, 0, comm);
MPI_Bcast(&tol, 1, MPI_DOUBLE, 0, comm);
MPI_Bcast(&mits, 1, MPI_INT, 0, comm);
// Those two calls also zero the boundary elements
// printf("-> rank %d, %d \n", my_rank, n);
maxIterationCount = mits;
maxAcceptableError = tol;
// Solve in [-1, 1] x [-1, 1]
double xLeft = -1.0,
xRight = 1.0;
double yBottom = -1.0, yUp = 1.0;
iterationCount = 0;
error = HUGE_VAL;
clock_t start = clock(), diff;
int size_n = n / (int)sqrt(comm_sz);
int size_m = m / (int)sqrt(comm_sz);
int squareComm = (int)sqrt(comm_sz);
double deltaX = (xRight - xLeft) / (n - 1);
double deltaY = (yUp - yBottom) / (m - 1);
MPI_Barrier(comm);
t1 = MPI_Wtime();
// double xStart = xLeft + deltaX * size_n * (my_rank % (int)sqrt(comm_sz));
// double yStart = yBottom + deltaY * size_m * (((comm_sz - (int)sqrt(comm_sz)) / (int)sqrt(comm_sz)) - (my_rank / (int)sqrt(comm_sz)));
xLeft = xLeft + deltaX * size_n * (my_rank % (int)sqrt(comm_sz));
yBottom = yBottom + deltaY * size_m * (((comm_sz - (int)sqrt(comm_sz)) / (int)sqrt(comm_sz)) - (my_rank / (int)sqrt(comm_sz)));
enum directions { DOWN,
UP,
LEFT,
RIGHT };
char *neighbour_names[4] = {
"left",
"right",
"up",
"down"};
int neighbour_ranks[4];
MPI_Cart_shift(comm, 0, 1, &neighbour_ranks[LEFT], &neighbour_ranks[RIGHT]);
MPI_Cart_shift(comm, 1, 1, &neighbour_ranks[DOWN], &neighbour_ranks[UP]);
MPI_Comm_rank(comm, &my_rank);
// for (int i = 0; i < 4; i++) {
// if (neighbour_ranks[i] == MPI_PROC_NULL)
// printf("[MPI process %d] I have no %s neighbour. neighbour_ranks %d\n", my_rank, neighbour_names[i], neighbour_ranks[i]);
// else
// printf("[MPI process %d] I have a %s neighbour: process %d neighbour_ranks.\n", my_rank, neighbour_names[i], neighbour_ranks[i]);
// }
/* Iterate as long as it takes to meet the convergence criterion */
u = (double *)calloc(((size_n + 2) * (size_m + 2)), sizeof(double)); //reverse order
u_old = (double *)calloc(((size_n + 2) * (size_m + 2)), sizeof(double));
if (u == NULL || u_old == NULL) {
printf("Not enough memory for two %ix%i matrices\n", n + 2, m + 2);
exit(1);
}
// maxIterationCount
while (iterationCount < maxIterationCount && error > maxAcceptableError) {
// printf("Iteration %i", iterationCount);
#define SRC(XX, YY) u_old[(YY) * (size_n + 2) + (XX)]
#define DST(XX, YY) u[(YY) * (size_n + 2) + (XX)]
int x, y;
double fX, fY;
error = 0.0;
double updateVal;
double f;
// Coefficients
double cx = 1.0 / (deltaX * deltaX);
double cy = 1.0 / (deltaY * deltaY);
double cc = -2.0 * cx - 2.0 * cy - alpha;
int neighbourUp = my_rank - squareComm;
int neighbourDown = my_rank + squareComm;
int neighbourLeft = my_rank - 1;
int neighbourRight = my_rank + 1;
int firstColumn = (my_rank % squareComm == 0);
int lastColumn = ((my_rank + 1) % squareComm) == 0;
int firstRow = (my_rank / squareComm) == 0;
int lastRow = (my_rank / squareComm) == (squareComm - 1);
//Define Column and Row Types
MPI_Datatype column_type;
MPI_Type_vector(size_n, 1, size_n + 2, MPI_DOUBLE, &column_type);
MPI_Type_commit(&column_type);
MPI_Datatype row_type;
MPI_Type_contiguous(size_n, MPI_DOUBLE, &row_type);
MPI_Type_commit(&row_type);
MPI_Request sendRequests[4];
MPI_Request receiveRequests[4];
double dataRight[size_n];
double dataLeft[size_n];
double dataUp[size_n];
double dataDown[size_n];
for(int i =0 ; i < size_n ; i ++){
dataLeft[i] = 0.0;
}
for(int i =0 ; i < size_n ; i ++){
dataRight[i] = 0.0;
}
for(int i =0 ; i < size_n ; i ++){
dataUp[i] = 0.0;
}
for(int i =0 ; i < size_n ; i ++){
dataDown[i] = 0.0;
}
MPI_Irecv(&dataLeft, size_n, MPI_DOUBLE, neighbour_ranks[0], 0, comm, &receiveRequests[0]);
MPI_Irecv(&dataRight, size_n, MPI_DOUBLE, neighbour_ranks[1], 0, comm, &receiveRequests[1]);
MPI_Irecv(&dataUp, size_n, MPI_DOUBLE, neighbour_ranks[2], 0, comm, &receiveRequests[2]);
MPI_Irecv(&dataDown, size_n, MPI_DOUBLE, neighbour_ranks[3], 0, comm, &receiveRequests[3]);
MPI_Isend(&(SRC(1, 1)), 1, column_type, neighbour_ranks[0], 0, comm, &sendRequests[0]);
MPI_Isend(&(SRC(size_n, 1)), 1, column_type, neighbour_ranks[1], 0, comm, &sendRequests[1]);
MPI_Isend(&(SRC(1, 1)), 1, row_type, neighbour_ranks[2], 0, comm, &sendRequests[2]);
MPI_Isend(&(SRC(1, size_n)), 1, row_type, neighbour_ranks[3], 0, comm, &sendRequests[3]);
#pragma omp parallel for num_threads(4) collapse(2) private(fX,f,fY,updateVal)\
reduction(+: error) schedule(static,1)
for (y = 2; y < (size_m); y++) { // white boxes
for (x = 2; x < (size_n); x++) {
fY = yBottom + (y - 1) * deltaY;
fX = xLeft + (x - 1) * deltaX;
f = -alpha * (1.0 - fX * fX) * (1.0 - fY * fY) - 2.0 * (1.0 - fX * fX) - 2.0 * (1.0 - fY * fY);
updateVal = ((SRC(x - 1, y) + SRC(x + 1, y)) * cx +
(SRC(x, y - 1) + SRC(x, y + 1)) * cy +
SRC(x, y) * cc - f) /
cc;
DST(x, y) = SRC(x, y) - relax * updateVal;
error += updateVal * updateVal;
// if ((my_rank == 0 && (!(y%50))&&(!(x%50)))){
// printf("y = %d x = %d updateval = %g\n",y,x,updateVal);
// }
}
}
MPI_Waitall(4, receiveRequests, MPI_STATUSES_IGNORE);
// if(!my_rank && iterationCount==10){
// for(int i = 0;i<size_n;i++){
// printf("%g %g \n",dataRight[i],SRC(size_n,i+1));
// }
// printf("\n");
// }
for (int i = 1; i < size_n + 1; i++) {
SRC(size_n + 1, i) = dataRight[i - 1];
SRC(0, i) = dataLeft[i - 1];
SRC(i, 0) = dataUp[i - 1];
SRC(i, size_n+1) = dataDown[i - 1];
}
// error = sqrt(error) / ((size_n - 2) * (size_m - 2));
// #pragma omp parallel num_threads(4)
// #pragma omp for collapse(2) private(updateVal,fY,fX,f) \
// reduction(+: error) schedule(static,1)
// if ((my_rank == 0 && (!(y%50))&&(!(x%50)))){
// thread_rank = omp_get_thread_num();
// printf("Hello from thread: %d y= %d x= %d updateval = %g\n", thread_rank,y,x,updateVal);
// }
y = 1;
fY = yBottom + (y - 1) * deltaY;
#pragma omp parallel for num_threads(4) private(fX,f,updateVal)\
reduction(+: error) schedule(static,1)
for (x = 1; x < size_n + 1; x++) {
fX = xLeft + (x - 1) * deltaX;
f = -alpha * (1.0 - fX * fX) * (1.0 - fY * fY) - 2.0 * (1.0 - fX * fX) - 2.0 * (1.0 - fY * fY);
updateVal = ((SRC(x - 1, y) + SRC(x + 1, y)) * cx +
(SRC(x, y - 1) + SRC(x, y + 1)) * cy +
SRC(x, y) * cc - f) /
cc;
DST(x, y) = SRC(x, y) - relax * updateVal;
error += updateVal * updateVal;
}
y = size_m;
fY = yBottom + (y - 1) * deltaY;
#pragma omp parallel for num_threads(4) private(fX,f,updateVal)\
reduction(+: error) schedule(static,1)
for (x = 1; x < size_n + 1; x++) {
fX = xLeft + (x - 1) * deltaX;
f = -alpha * (1.0 - fX * fX) * (1.0 - fY * fY) - 2.0 * (1.0 - fX * fX) - 2.0 * (1.0 - fY * fY);
updateVal = ((SRC(x - 1, y) + SRC(x + 1, y)) * cx +
(SRC(x, y - 1) + SRC(x, y + 1)) * cy +
SRC(x, y) * cc - f) /
cc;
DST(x, y) = SRC(x, y) - relax * updateVal;
error += updateVal * updateVal;
}
x = 1;
fX = xLeft + (x - 1) * deltaX;
#pragma omp parallel for num_threads(4) private(fY,f,updateVal)\
reduction(+: error) schedule(static,1)
for (y = 1; y < size_m + 1; y++) { // green columns
fY = yBottom + (y - 1) * deltaY;
f = -alpha * (1.0 - fX * fX) * (1.0 - fY * fY) - 2.0 * (1.0 - fX * fX) - 2.0 * (1.0 - fY * fY);
updateVal = ((SRC(x - 1, y) + SRC(x + 1, y)) * cx +
(SRC(x, y - 1) + SRC(x, y + 1)) * cy +
SRC(x, y) * cc - f) /
cc;
DST(x, y) = SRC(x, y) - relax * updateVal;
error += updateVal * updateVal;
// printf("%d column error \n", error);
}
x = size_n;
fX = xLeft + (x - 1) * deltaX;
#pragma omp parallel for num_threads(4) private(fY,f,updateVal)\
reduction(+: error) schedule(static,1)
for (y = 1; y < size_m + 1; y++) { // green columns
fY = yBottom + (y - 1) * deltaY;
f = -alpha * (1.0 - fX * fX) * (1.0 - fY * fY) - 2.0 * (1.0 - fX * fX) - 2.0 * (1.0 - fY * fY);
updateVal = ((SRC(x - 1, y) + SRC(x + 1, y)) * cx +
(SRC(x, y - 1) + SRC(x, y + 1)) * cy +
SRC(x, y) * cc - f) /
cc;
DST(x, y) = SRC(x, y) - relax * updateVal;
error += updateVal * updateVal;
// printf("%d column error \n", error);
}
MPI_Allreduce(&error, &global_sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
error = sqrt(global_sum) / (((n + 2) - 2) * ((m + 2) - 2));
MPI_Waitall(4, sendRequests, MPI_STATUSES_IGNORE);
iterationCount++;
tmp = u_old;
u_old = u;
u = tmp;
// printf("\tError %g IN IT %d\n", error,iterationCount);
}
t2 = MPI_Wtime();
printf("Iterations=%3d Elapsed MPI Wall time is %f\n", iterationCount, t2 - t1);
diff = clock() - start;
int msec = diff * 1000 / CLOCKS_PER_SEC;
printf("Time taken %d seconds %d milliseconds\n", msec / 1000, msec % 1000);
printf("Residual %g\n", error);
// u_old holds the solution after the most recent buffers swap
double absoluteError = checkSolution(xLeft, yBottom,
size_n + 2, size_m + 2,
u_old,
deltaX, deltaY,
alpha);
MPI_Allreduce(&absoluteError, &global_error, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
global_error = sqrt(global_error) / (((n + 2) - 2) * ((m + 2) - 2));
printf("The error of the iterative solution is %g\n", global_error);
MPI_Finalize();
return 0;
}
|
bml_multiply_csr_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_add.h"
#include "../bml_allocate.h"
#include "../bml_logger.h"
#include "../bml_multiply.h"
#include "../bml_parallel.h"
#include "../bml_types.h"
#include "bml_add_csr.h"
#include "bml_allocate_csr.h"
#include "bml_multiply_csr.h"
#include "bml_types_csr.h"
#include "bml_setters_csr.h"
#include <complex.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Matrix multiply.
*
* \f$ C \leftarrow \alpha A \, B + \beta C \f$
*
* \ingroup multiply_group
*
* \param A Matrix A
* \param B Matrix B
* \param C Matrix C
* \param alpha Scalar factor multiplied by A * B
* \param beta Scalar factor multiplied by C
* \param threshold Used for sparse multiply
*/
void TYPED_FUNC(
bml_multiply_csr) (
bml_matrix_csr_t * A,
bml_matrix_csr_t * B,
bml_matrix_csr_t * C,
double alpha,
double beta,
double threshold)
{
double ONE = 1.0;
double ZERO = 0.0;
void *trace = NULL;
if (A == NULL || B == NULL)
{
LOG_ERROR("Either matrix A or B are NULL\n");
}
if (A == B && alpha == ONE && beta == ZERO)
{
trace = TYPED_FUNC(bml_multiply_x2_csr) (A, C, threshold);
}
else
{
bml_matrix_dimension_t matrix_dimension = { C->N_, C->N_, C->NZMAX_ };
bml_matrix_csr_t *A2 =
TYPED_FUNC(bml_noinit_matrix_csr) (matrix_dimension,
A->distribution_mode);
if (A != NULL && A == B)
{
trace = TYPED_FUNC(bml_multiply_x2_csr) (A, A2, threshold);
}
else
{
TYPED_FUNC(bml_multiply_AB_csr) (A, B, A2, threshold);
}
#ifdef DO_MPI
if (bml_getNRanks() > 1 && A2->distribution_mode == distributed)
{
bml_allGatherVParallel(A2);
}
#endif
TYPED_FUNC(bml_add_csr) (C, A2, beta, alpha, threshold);
bml_deallocate_csr(A2);
}
bml_free_memory(trace);
}
/** Matrix multiply.
*
* \f$ X^{2} \leftarrow X \, X \f$
*
* \ingroup multiply_group
*
* \param X Matrix X
* \param X2 Matrix X2
* \param threshold Used for sparse multiply
*/
void *TYPED_FUNC(
bml_multiply_x2_csr) (
bml_matrix_csr_t * X,
bml_matrix_csr_t * X2,
double threshold)
{
int X_N = X->N_;
REAL_T traceX = 0.0;
REAL_T traceX2 = 0.0;
double *trace = bml_allocate_memory(sizeof(double) * 2);
#if !(defined(__IBMC__) || defined(__ibmxl__))
int ix[X_N], jx[X_N];
REAL_T x[X_N];
memset(ix, 0, X_N * sizeof(int));
memset(jx, 0, X_N * sizeof(int));
memset(x, 0.0, X_N * sizeof(REAL_T));
#endif
#if defined(__IBMC__) || defined(__ibmxl__)
#pragma omp parallel for \
shared(X_N) \
reduction(+: traceX, traceX2)
#else
#pragma vector aligned
#pragma omp parallel for \
shared(X_N) \
firstprivate(ix,jx, x) \
reduction(+: traceX, traceX2)
#endif
for (int i = 0; i < X_N; i++) // CALCULATES THRESHOLDED X^2
{
#if defined(__IBMC__) || defined(__ibmxl__)
int ix[X_N], jx[X_N];
REAL_T x[X_N];
memset(ix, 0, X_N * sizeof(int));
#endif
int *icols = X->data_[i]->cols_;
REAL_T *ivals = (REAL_T *) X->data_[i]->vals_;
const int innz = X->data_[i]->NNZ_;
int l = 0;
for (int ipos = 0; ipos < innz; ipos++)
{
REAL_T a = ivals[ipos];
const int j = icols[ipos];
if (j == i)
{
traceX = traceX + a;
}
const int jnnz = X->data_[j]->NNZ_;
REAL_T *jvals = (REAL_T *) X->data_[j]->vals_;
int *jcols = X->data_[j]->cols_;
for (int jpos = 0; jpos < jnnz; jpos++)
{
const int k = jcols[jpos];
if (ix[k] == 0)
{
x[k] = 0.0;
jx[l] = k;
ix[k] = i + 1;
l++;
}
// TEMPORARY STORAGE VECTOR LENGTH FULL N
x[k] = x[k] + a * jvals[jpos];
}
}
// clear row
TYPED_FUNC(csr_clear_row) (X2->data_[i]);
for (int j = 0; j < l; j++)
{
int jp = jx[j];
REAL_T xtmp = x[jp];
if (jp == i)
{
traceX2 = traceX2 + xtmp;
TYPED_FUNC(csr_set_row_element_new) (X2->data_[i], jp, &xtmp);
}
else if (is_above_threshold(xtmp, threshold))
{
TYPED_FUNC(csr_set_row_element_new) (X2->data_[i], jp, &xtmp);
}
// reset
ix[jp] = 0;
x[jp] = 0.0;
}
}
trace[0] = traceX;
trace[1] = traceX2;
return trace;
}
/** Matrix multiply.
*
* \f$ C \leftarrow B \, A \f$
*
* \ingroup multiply_group
*
* \param A Matrix A
* \param B Matrix B
* \param C Matrix C
* \param threshold Used for sparse multiply
*/
void TYPED_FUNC(
bml_multiply_AB_csr) (
bml_matrix_csr_t * A,
bml_matrix_csr_t * B,
bml_matrix_csr_t * C,
double threshold)
{
const int A_N = A->N_;
const int C_N = C->N_;
#if !(defined(__IBMC__) || defined(__ibmxl__))
int ix[C_N], jx[C_N];
REAL_T x[C_N];
memset(ix, 0, C_N * sizeof(int));
memset(jx, 0, C_N * sizeof(int));
memset(x, 0.0, C_N * sizeof(REAL_T));
#endif
#if defined(__IBMC__) || defined(__ibmxl__)
#pragma omp parallel for \
shared(A_N, C_N)
#else
#pragma omp parallel for \
firstprivate(ix, jx, x)
#endif
for (int i = 0; i < A_N; i++)
{
#if defined(__IBMC__) || defined(__ibmxl__)
int ix[C_N], jx[C_N];
REAL_T x[C_N];
memset(ix, 0, C_N * sizeof(int));
#endif
int *acols = A->data_[i]->cols_;
REAL_T *avals = (REAL_T *) A->data_[i]->vals_;
const int annz = A->data_[i]->NNZ_;
int l = 0;
for (int pos = 0; pos < annz; pos++)
{
REAL_T a = avals[pos];
const int j = acols[pos];
const int bnnz = B->data_[j]->NNZ_;
REAL_T *bvals = (REAL_T *) B->data_[j]->vals_;
int *bcols = B->data_[j]->cols_;
for (int bpos = 0; bpos < bnnz; bpos++)
{
const int k = bcols[bpos];
if (ix[k] == 0)
{
x[k] = 0.0;
jx[l] = k;
ix[k] = i + 1;
l++;
}
// TEMPORARY STORAGE VECTOR LENGTH FULL N
x[k] = x[k] + a * bvals[bpos];
}
}
// clear row
TYPED_FUNC(csr_clear_row) (C->data_[i]);
for (int j = 0; j < l; j++)
{
int jp = jx[j];
REAL_T xtmp = x[jp];
if (jp == i)
{
TYPED_FUNC(csr_set_row_element_new) (C->data_[i], jp, &xtmp);
}
else if (is_above_threshold(xtmp, threshold))
{
TYPED_FUNC(csr_set_row_element_new) (C->data_[i], jp, &xtmp);
}
// reset
ix[jp] = 0;
x[jp] = 0.0;
}
}
}
/* Not sure why we have this routine (for ellpack, ellblock, etc). Using default here,
* with no threshold adjustment.
*/
/** Matrix multiply with threshold adjustment.
*
* \f$ C \leftarrow B \, A \f$
*
* \ingroup multiply_group
*
* \param A Matrix A
* \param B Matrix B
* \param C Matrix C
* \param threshold Used for sparse multiply
*/
void TYPED_FUNC(
bml_multiply_adjust_AB_csr) (
bml_matrix_csr_t * A,
bml_matrix_csr_t * B,
bml_matrix_csr_t * C,
double threshold)
{
TYPED_FUNC(bml_multiply_AB_csr) (A, B, C, threshold);
}
|
test6.c | int g1;
void foo () {
g1=0;
#pragma omp barrier
1+g1;
}
int main() {
#pragma omp parallel
{
int p;
g1=2;
if (3) {
p=4;
g1 = 10;
foo ();
5+g1;
} else {
p=6+g1;
//#pragma omp atomic read
// p = g1;
#pragma omp barrier
g1=7;
}
if (8) {
9=g1;
foo();
g1=10+g1;
} else {
11+g1;
#pragma omp barrier
12=g1;
}
g1;
13;
}
}
|
GB_binop__isle_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_uint8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__isle_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint8)
// A*D function (colscale): GB (_AxD__isle_uint8)
// D*A function (rowscale): GB (_DxB__isle_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint8)
// C=scalar+B GB (_bind1st__isle_uint8)
// C=scalar+B' GB (_bind1st_tran__isle_uint8)
// C=A+scalar GB (_bind2nd__isle_uint8)
// C=A'+scalar GB (_bind2nd_tran__isle_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_UINT8 || GxB_NO_ISLE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isle_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_int16
// op(A') function: GB_tran__lnot_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_int16
(
uint8_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
AveragePooling.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include <vector>
#include <random>
#include "bb/Filter2d.h"
namespace bb {
// AveragePoolingクラス
template <typename FT = float, typename BT = float>
class AveragePooling : public Filter2d<FT, BT>
{
using _super = Filter2d<FT, BT>;
protected:
bool m_host_only = false;
index_t m_filter_h_size;
index_t m_filter_w_size;
index_t m_input_w_size;
index_t m_input_h_size;
index_t m_input_c_size;
index_t m_output_w_size;
index_t m_output_h_size;
index_t m_output_c_size;
indices_t m_input_shape;
indices_t m_output_shape;
FrameBuffer m_x;
FrameBuffer m_y;
FrameBuffer m_dx;
protected:
AveragePooling() {}
/**
* @brief コマンド処理
* @detail コマンド処理
* @param args コマンド
*/
void CommandProc(std::vector<std::string> args)
{
_super::CommandProc(args);
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
}
public:
~AveragePooling() {}
static std::shared_ptr<MaxPooling> Create(index_t filter_h_size, index_t filter_w_size)
{
auto self = std::shared_ptr<AveragePooling>(new AveragePooling);
self->m_filter_h_size = filter_h_size;
self->m_filter_w_size = filter_w_size;
return self;
}
std::string GetModelName(void) const { return "AveragePooling"; }
index_t GetFilterHeight(void) const override { return m_filter_h_size; }
index_t GetFilterWidth(void) const override { return m_filter_w_size; }
/**
* @brief 入力形状設定
* @detail 入力形状を設定する
* 内部変数を初期化し、以降、GetOutputShape()で値取得可能となることとする
* 同一形状を指定しても内部変数は初期化されるものとする
* @param shape 1フレームのノードを構成するshape
* @return 出力形状を返す
*/
indices_t SetInputShape(indices_t shape)
{
// 設定済みなら何もしない
if ( shape == this->GetInputShape() ) {
return this->GetOutputShape();
}
BB_ASSERT(shape.size() == 3);
m_input_w_size = shape[0];
m_input_h_size = shape[1];
m_input_c_size = shape[2];
m_output_w_size = (m_input_w_size + m_filter_w_size - 1) / m_filter_w_size;
m_output_h_size = (m_input_h_size + m_filter_h_size - 1) / m_filter_h_size;
m_output_c_size = m_input_c_size;
m_input_shape = shape;
m_output_shape = indices_t({m_output_w_size, m_output_h_size, m_output_c_size});
return m_output_shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const
{
return m_input_shape;
}
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const
{
return m_output_shape;
}
protected:
/*
inline void* GetInputPtr(NeuralNetBuffer<T>& buf, int c, int y, int x)
{
return buf.Lock((c*m_input_h_size + y)*m_input_w_size + x);
}
inline void* GetOutputPtr(NeuralNetBuffer<T>& buf, int c, int y, int x)
{
return buf.Lock((c*m_output_h_size + y)*m_output_w_size + x);
}
*/
inline index_t GetInputNode(index_t c, index_t y, index_t x)
{
return (c * m_input_h_size + y) * m_input_w_size + x;
}
inline index_t GetOutputNode(index_t c, index_t y, index_t x)
{
return (c * m_output_h_size + y) * m_output_w_size + x;
}
public:
FrameBuffer Forward(FrameBuffer x, bool train = true)
{
BB_ASSERT(x.GetType() == DataType<FT>::type);
// backwardの為に保存
m_x = x;
// SetInputShpaeされていなければ初回に設定
if (m_x.GetShape() != m_input_shape) {
SetInputShape(m_x.GetShape());
}
// 出力を設定
m_y.Resize(DataType<FT>::type, m_x.GetFrameSize(), m_output_shape);
#if 0 // #ifdef BB_WITH_CUDA
// CUDA版
if ( DataType<FT>::type == BB_TYPE_FP32 && !m_host_only && m_x.IsDeviceAvailable() && m_y.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto ptr_x = x.LockDeviceMemoryConst();
auto ptr_y = m_y.LockDeviceMemory(true);
bbcu_fp32_MaxPooling_Forward
(
(float const *)ptr_x.GetAddr(),
(float* )ptr_y.GetAddr(),
(int )m_filter_h_size,
(int )m_filter_w_size,
(int )m_input_w_size,
(int )m_input_h_size,
(int )m_output_w_size,
(int )m_output_h_size,
(int )m_output_c_size,
(int )m_y.GetFrameSize(),
(int )(m_y.GetFrameStride() / sizeof(float))
);
return m_y;
}
#endif
#if 0
if ( DataType<FT>::type == BB_TYPE_BIT ) {
// バイナリ用実装
auto x_ptr = m_x.LockConst<FT>();
auto y_ptr = m_y.Lock<FT>(true);
index_t m256_frame_size = (int)m_y.GetFrameStride() / 32;
#pragma omp parallel for
for (index_t c = 0; c < m_input_c_size; ++c) {
for (index_t y = 0; y < m_output_h_size; ++y) {
for (index_t x = 0; x < m_output_w_size; ++x) {
__m256i *y_addr = (__m256i *)y_ptr.GetAddr(GetOutputNode(c, y, x));
for (index_t frame = 0; frame < m256_frame_size; ++frame) {
__m256i max_val = _mm256_set1_epi8(0);
for (index_t fy = 0; fy < m_filter_h_size; ++fy) {
index_t iy = y*m_filter_h_size + fy;
if ( iy < m_input_h_size ) {
for (index_t fx = 0; fx < m_filter_w_size; ++fx) {
index_t ix = x*m_filter_w_size + fx;
if ( ix < m_input_w_size ) {
__m256i const *x_addr = (__m256i const *)x_ptr.GetAddr(GetInputNode(c, iy, ix));
__m256i in_sig = _mm256_load_si256(&x_addr[frame]);
max_val = _mm256_or_si256(max_val, in_sig);
}
}
}
}
_mm256_store_si256(&y_addr[frame], max_val);
}
}
}
}
return m_y;
}
#endif
// float用実装
if ( DataType<FT>::type == BB_TYPE_FP32 ) {
auto x_ptr = m_x.LockConst<FT>();
auto y_ptr = m_y.Lock<FT>(true);
index_t m256_frame_size = (int)m_y.GetFrameStride() / sizeof(float);
__m256 frac = _mm256_set1_ps(1.0f / (m_filter_h_size * m_filter_w_size));
#pragma omp parallel for
for (index_t c = 0; c < m_input_c_size; ++c) {
for (index_t y = 0; y < m_output_h_size; ++y) {
for (index_t x = 0; x < m_output_w_size; ++x) {
float *y_addr = (float *)y_ptr.GetAddr(GetOutputNode(c, y, x));
for (index_t frame = 0; frame < m256_frame_size; frame += 8) {
__m256 sum_val = _mm256_set1_ps(0.0f); // 前段に活性化入れるから0がminだよね?
for (index_t fy = 0; fy < m_filter_h_size; ++fy) {
index_t iy = y*m_filter_h_size + fy;
if ( iy < m_input_h_size ) {
for (index_t fx = 0; fx < m_filter_w_size; ++fx) {
index_t ix = x*m_filter_w_size + fx;
if ( ix < m_input_w_size ) {
float const *x_addr = (float const *)x_ptr.GetAddr(GetInputNode(c, iy, ix));
__m256 in_sig = _mm256_load_ps(&x_addr[frame]);
sum_val = _mm256_add_ps(sum_val, in_sig);
}
}
}
}
__m256 ave_val = _mm256_mul_ps(sum_val, frac);
_mm256_store_ps(&y_addr[frame], ave_val);
}
}
}
}
return m_y;
}
// 汎用版実装
{
auto x_ptr = m_x.LockConst<FT>();
auto y_ptr = m_y.Lock<FT>(true);
auto frame_size = m_x.GetFrameSize();
#pragma omp parallel for
for (index_t c = 0; c < m_input_c_size; ++c) {
for (index_t y = 0; y < m_output_h_size; ++y) {
for (index_t x = 0; x < m_output_w_size; ++x) {
for (index_t frame = 0; frame < frame_size; ++frame) {
FT sum_val = 0;
for (index_t fy = 0; fy < m_filter_h_size; ++fy) {
index_t iy = y*m_filter_h_size + fy;
if ( iy < m_input_h_size ) {
for (index_t fx = 0; fx < m_filter_w_size; ++fx) {
index_t ix = x*m_filter_w_size + fx;
if ( ix < m_input_w_size ) {
FT in_sig = x_ptr.Get(frame, {ix, iy, c});
sum_val += in_sig;
}
}
}
}
y_ptr.Set(frame, {x, y, c}, sum_val / (m_filter_h_size * m_filter_w_size));
}
}
}
}
return m_y;
}
}
FrameBuffer Backward(FrameBuffer dy)
{
if (dy_buf.Empty()) {
return dy_buf;
}
BB_ASSERT(dy.GetType() == DataType<BT>::type);
m_dx.Resize(DataType<BT>::type, dy.GetFrameSize(), m_input_shape);
#if 0 // #ifdef BB_WITH_CUDA
if ( DataType<BT>::type == BB_TYPE_FP32 && DataType<FT>::type == BB_TYPE_FP32 && !m_host_only
&& m_x.IsDeviceAvailable() && m_y.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// CUDA版
auto ptr_x = m_x.LockDeviceMemoryConst();
auto ptr_y = m_y.LockDeviceMemoryConst();
auto ptr_dy = dy.LockDeviceMemoryConst();
auto ptr_dx = m_dx.LockDeviceMemory(true);
bbcu_fp32_MaxPooling_Backward
(
(float const *)ptr_x.GetAddr(),
(float const *)ptr_y.GetAddr(),
(float const *)ptr_dy.GetAddr(),
(float* )ptr_dx.GetAddr(),
(int )m_filter_h_size,
(int )m_filter_w_size,
(int )m_input_w_size,
(int )m_input_h_size,
(int )m_output_w_size,
(int )m_output_h_size,
(int )m_output_c_size,
(int )m_y.GetFrameSize(),
(int )(m_y.GetFrameStride() / sizeof(float))
);
return m_dx;
}
#endif
if ( DataType<BT>::type == BB_TYPE_FP32 && DataType<FT>::type == BB_TYPE_FP32 ) {
// float用実装
index_t m256_frame_size = m_dx.GetFrameStride() / sizeof(float);
m_dx.FillZero();
auto x_ptr = m_x.LockConst<FT>();
auto y_ptr = m_y.LockConst<FT>();
auto dy_ptr = dy.LockConst<BT>();
auto dx_ptr = m_dx.Lock<BT>(true);
__m256 frac = _mm256_set1_ps(1.0f / (m_filter_h_size * m_filter_w_size));
#pragma omp parallel for
for (index_t n = 0; n < m_input_c_size; ++n) {
for (index_t y = 0; y < m_output_h_size; ++y) {
for (index_t x = 0; x < m_output_w_size; ++x) {
float const * y_addr = (float const *)y_ptr.GetAddr(GetOutputNode(n, y, x));
float const * dy_addr = (float const *)dy_ptr.GetAddr(GetOutputNode(n, y, x));
for (index_t frame = 0; frame < m256_frame_size; frame += 8) {
__m256 out_sig = _mm256_load_ps(&y_addr[frame]);
__m256 out_grad = _mm256_load_ps(&dy_addr[frame]);
__m256 in_grad = _mm256_mul_ps(frac, out_grad);
for (index_t fy = 0; fy < m_filter_h_size; ++fy) {
index_t iy = y*m_filter_h_size + fy;
if ( iy < m_input_h_size ) {
for (index_t fx = 0; fx < m_filter_w_size; ++fx) {
index_t ix = x*m_filter_w_size + fx;
if ( ix < m_input_w_size ) {
float *dx_addr = (float *)dx_ptr.GetAddr(GetInputNode(n, iy, ix));
__m256 dx = _mm256_load_ps(&dx_addr[frame]);
__m256 dx = _mm256_add_ps(dx, in_grad);
_mm256_store_ps(&dx_addr[frame], dx);
}
}
}
}
}
}
}
}
return m_dx;
}
// 汎用版実装
{
m_dx.FillZero();
auto x_ptr = m_x.LockConst<FT>();
auto y_ptr = m_y.LockConst<FT>();
auto dy_ptr = dy.LockConst<BT>();
auto dx_ptr = m_dx.Lock<BT>(true);
auto frame_size = m_x.GetFrameSize();
#pragma omp parallel for
for (index_t c = 0; c < m_input_c_size; ++c) {
for (index_t y = 0; y < m_output_h_size; ++y) {
for (index_t x = 0; x < m_output_w_size; ++x) {
for (index_t frame = 0; frame < frame_size; ++frame) {
FT out_sig = y_ptr.Get(frame, c, y, x);
BT grad = dy_ptr.Get(frame, c, y, x);
for (index_t fy = 0; fy < m_filter_h_size; ++fy) {
index_t iy = y*m_filter_h_size + fy;
if ( iy < m_input_h_size ) {
for (index_t fx = 0; fx < m_filter_w_size; ++fx) {
index_t ix = x*m_filter_w_size + fx;
if ( ix < m_input_w_size ) {
dx_ptr.Add(frame, c, iy, ix, grad / (m_filter_h_size * m_filter_w_size));
}
}
}
}
}
}
}
}
return m_dx;
}
}
};
} |
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*! \brief operator request type switch */
#define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
{ \
const OpReqType ReqType = kNullOp; \
{__VA_ARGS__} \
} \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
LOG(FATAL) << "This operation does not " \
"support float16"; \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
template <typename T>
struct AccType {
using type = T;
};
template <>
struct AccType<mshadow::half::half_t> {
using type = float;
};
#define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not uint8"; \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not int8"; \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int32_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int32"; \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int64"; \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH(type, DType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Invalid loading enum type " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
#define MXNET_ADD_ALL_TYPES \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64)
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template<typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in,
const DType value_1, const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch a generic CPU kernel with dynamic schedule. This is recommended
* for irregular workloads such as spmv.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false);
if (omp_threads < 2) {
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads) schedule(dynamic)
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
N, static_cast<size_t>(omp_threads))) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const auto length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
Example_reduction.3.c | /*
* @@name: reduction.3c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: rt-error
*/
#include <stdio.h>
int main (void)
{
int a, i;
#pragma omp parallel shared(a) private(i)
{
#pragma omp master
a = 0;
// To avoid race conditions, add a barrier here.
#pragma omp for reduction(+:a)
for (i = 0; i < 10; i++) {
a += i;
}
#pragma omp single
printf ("Sum is %d\n", a);
}
return 0;
}
|
functions.h | #ifndef __FUNCTIONS_H__
#define __FUNCTIONS_H__
#include "../scaling/scaling.h"
#include "../summation/summation.h"
#include "../contraction/contraction.h"
namespace CTF {
/**
* @defgroup CTF_func CTF functions
* \brief user-defined function interface
* @addtogroup CTF_func
* @{
*/
class Idx_Tensor;
/**
* \brief custom scalar function on tensor: e.g. A["ij"] = f(A["ij"])
*/
template<typename dtype=double>
class Endomorphism : public CTF_int::endomorphism {
public:
/**
* \brief function signature for element-wise operation a=f(a)
*/
//dtype (*f)(dtype);
std::function<void(dtype&)> f;
/**
* \brief constructor takes function pointer
* \param[in] f_ scalar function: (type) -> (type)
*/
Endomorphism(std::function<void(dtype&)> f_){ f = f_; }
/**
* \brief default constructor
*/
Endomorphism(){}
/**
* \brief apply function f to value stored at a
* \param[in,out] a pointer to operand that will be cast to dtype
* is set to result of applying f on value at a
*/
void apply_f(char * a) const { f(((dtype*)a)[0]); }
};
/**
* \brief custom function f : X -> Y to be applied to tensor elemetns:
* e.g. B["ij"] = f(A["ij"])
*/
template<typename dtype_A=double, typename dtype_B=dtype_A>
class Univar_Function : public CTF_int::univar_function {
public:
/**
* \brief function signature for element-wise multiplication, compute b=f(a)
*/
//dtype_B (*f)(dtype_A);
std::function<dtype_B(dtype_A)> f;
/**
* \brief constructor takes function pointers to compute B=f(A));
* \param[in] f_ linear function (type_A)->(type_B)
*/
Univar_Function(std::function<dtype_B(dtype_A)> f_){ f = f_; }
/**
* \brief apply function f to value stored at a
* \param[in] a pointer to operand that will be cast to dtype
* \param[in,out] b result &f(*a) of applying f on value of (different type) on a
*/
void apply_f(char const * a, char * b) const { ((dtype_B*)b)[0]=f(((dtype_A*)a)[0]); }
/**
* \brief compute b = b+f(a)
* \param[in] a pointer to operand that will be cast to dtype
* \param[in,out] b result &f(*a) of applying f on value of (different type) on a
* \param[in] sr_B algebraic structure for b, needed to do add
*/
void acc_f(char const * a, char * b, CTF_int::algstrct const * sr_B) const {
dtype_B tb=f(((dtype_A*)a)[0]);
sr_B->add(b, (char const *)&tb, b);
}
};
/**
* \brief custom function f : (X * Y) -> X applied on two tensors as summation:
* e.g. B["ij"] = f(A["ij"],B["ij"])
*/
template<typename dtype_A=double, typename dtype_B=dtype_A>
class Univar_Transform : public CTF_int::univar_function {
public:
/**
* \brief function signature for element-wise multiplication, compute b=f(a)
*/
//void (*f)(dtype_A, dtype_B &);
std::function<void(dtype_A, dtype_B &)> f;
/**
* \brief constructor takes function pointers to compute B=f(A));
* \param[in] f_ linear function (type_A)->(type_B)
*/
Univar_Transform(std::function<void(dtype_A, dtype_B &)> f_){ f = f_; }
/**
* \brief apply function f to value stored at a, for an accumulator, this is the same as acc_f below
* \param[in] a pointer to operand that will be cast to dtype
* \param[in,out] b result &f(*a) of applying f on value of (different type) on a
*/
void apply_f(char const * a, char * b) const { acc_f(a,b,NULL); }
/**
* \brief compute f(a,b)
* \param[in] a pointer to the accumulated operand
* \param[in,out] b value that is accumulated to
* \param[in] sr_B algebraic structure for b, here is ignored
*/
void acc_f(char const * a, char * b, CTF_int::algstrct const * sr_B) const {
f(((dtype_A*)a)[0], ((dtype_B*)b)[0]);
}
bool is_accumulator() const { return true; }
};
/**
* \brief custom bivariate function on two tensors:
* e.g. C["ij"] = f(A["ik"],B["kj"])
*/
template<typename dtype_A=double, typename dtype_B=dtype_A, typename dtype_C=dtype_A>
class Bivar_Function : public CTF_int::bivar_function {
public:
/**
* \brief function signature for element-wise multiplication, compute C=f(A,B)
*/
//dtype_C (*f)(dtype_A, dtype_B);
std::function<dtype_C (dtype_A, dtype_B)> f;
/**
* \brief constructor takes function pointers to compute C=f(A,B);
* \param[in] f_ bivariate function (type_A,type_B)->(type_C)
*/
Bivar_Function(std::function<dtype_C (dtype_A, dtype_B)> f_)
: CTF_int::bivar_function(){
f=f_; commutative=0;
}
/**
* \brief constructor takes function pointers to compute C=f(A,B);
* \param[in] f_ bivariate function (type_A,type_B)->(type_C)
* \param[in] is_comm whether function is commutative
*/
Bivar_Function(std::function<dtype_C (dtype_A, dtype_B)> f_,
bool is_comm)
: CTF_int::bivar_function(is_comm){
f=f_;
}
/**
* \brief default constructor sets function pointer to NULL
*/
Bivar_Function();
/**
* \brief compute c = f(a,b)
* \param[in] a pointer to operand that will be cast to dtype
* \param[in] b pointer to operand that will be cast to dtype
* \param[in,out] c result c+f(*a,b) of applying f on value of (different type) on a
*/
void apply_f(char const * a, char const * b, char * c) const {
((dtype_C*)c)[0] = f(((dtype_A const*)a)[0],((dtype_B const*)b)[0]);
}
/**
* \brief compute c = c+ f(a,b)
* \param[in] a pointer to operand that will be cast to dtype
* \param[in] b pointer to operand that will be cast to dtype
* \param[in,out] c result c+f(*a,b) of applying f on value of (different type) on a
* \param[in] sr_C algebraic structure for b, needed to do add
*/
void acc_f(char const * a, char const * b, char * c, CTF_int::algstrct const * sr_C) const {
dtype_C tmp;
tmp = f(((dtype_A const*)a)[0],((dtype_B const*)b)[0]);
sr_C->add(c, (char const *)&tmp, c);
}
// FIXME: below kernels replicate code from src/interface/semiring.h
void csrmm(int m,
int n,
int k,
dtype_A const * A,
int const * JA,
int const * IA,
int64_t nnz_A,
dtype_B const * B,
dtype_C * C,
CTF_int::algstrct const * sr_C) const {
//TAU_FSTART(3type_csrmm);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int row_A=0; row_A<m; row_A++){
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int col_B=0; col_B<n; col_B++){
for (int i_A=IA[row_A]-1; i_A<IA[row_A+1]-1; i_A++){
int col_A = JA[i_A]-1;
dtype_C tmp = f(A[i_A],B[col_B*k+col_A]);
sr_C->add((char const *)&C[col_B*m+row_A],(char const*)&tmp,(char *)&C[col_B*m+row_A]);
}
}
}
//TAU_FSTOP(3type_csrmm);
}
void csrmultd
(int m,
int n,
int k,
dtype_A const * A,
int const * JA,
int const * IA,
int nnz_A,
dtype_B const * B,
int const * JB,
int const * IB,
int nnz_B,
dtype_C * C,
CTF_int::algstrct const * sr_C) const {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int row_A=0; row_A<m; row_A++){
for (int i_A=IA[row_A]-1; i_A<IA[row_A+1]-1; i_A++){
int row_B = JA[i_A]-1; //=col_A
for (int i_B=IB[row_B]-1; i_B<IB[row_B+1]-1; i_B++){
int col_B = JB[i_B]-1;
dtype_C tmp = f(A[i_A],B[i_B]);
sr_C->add((char const*)&C[col_B*m+row_A],(char const*)&tmp,(char *)&C[col_B*m+row_A]);
}
}
}
}
void csrmultcsr
(int m,
int n,
int k,
dtype_A const * A,
int const * JA,
int const * IA,
int nnz_A,
dtype_B const * B,
int const * JB,
int const * IB,
int nnz_B,
char *& C_CSR,
CTF_int::algstrct const * sr_C) const {
int * IC = (int*)CTF_int::alloc(sizeof(int)*(m+1));
int * has_col = (int*)CTF_int::alloc(sizeof(int)*n);
IC[0] = 1;
for (int i=0; i<m; i++){
memset(has_col, 0, sizeof(int)*n);
IC[i+1] = IC[i];
CTF_int::CSR_Matrix::compute_has_col(JA, IA, JB, IB, i, has_col);
for (int j=0; j<n; j++){
IC[i+1] += has_col[j];
}
}
CTF_int::CSR_Matrix C(IC[m]-1, m, n, sr_C);
dtype_C * vC = (dtype_C*)C.vals();
int * JC = C.JA();
memcpy(C.IA(), IC, sizeof(int)*(m+1));
CTF_int::cdealloc(IC);
IC = C.IA();
int64_t * rev_col = (int64_t*)CTF_int::alloc(sizeof(int64_t)*n);
for (int i=0; i<m; i++){
memset(has_col, 0, sizeof(int)*n);
CTF_int::CSR_Matrix::compute_has_col(JA, IA, JB, IB, i, has_col);
int vs = 0;
for (int j=0; j<n; j++){
if (has_col[j]){
JC[IC[i]+vs-1] = j+1;
rev_col[j] = IC[i]+vs-1;
vs++;
}
}
memset(has_col, 0, sizeof(int)*n);
for (int j=0; j<IA[i+1]-IA[i]; j++){
int row_B = JA[IA[i]+j-1]-1;
int idx_A = IA[i]+j-1;
for (int l=0; l<IB[row_B+1]-IB[row_B]; l++){
int idx_B = IB[row_B]+l-1;
if (has_col[JB[idx_B]-1]){
dtype_C tmp = f(A[idx_A],B[idx_B]);
sr_C->add((char const *)&vC[rev_col[JB[idx_B]-1]], (char const *)&tmp, (char *)&vC[rev_col[JB[idx_B]-1]]);
} else {
vC[rev_col[JB[idx_B]-1]] = f(A[idx_A],B[idx_B]);
}
has_col[JB[idx_B]-1] = 1;
}
}
}
CTF_int::CSR_Matrix C_in(C_CSR);
if (C_CSR == NULL || C_in.nnz() == 0){
C_CSR = C.all_data;
} else {
char * ans = CTF_int::CSR_Matrix::csr_add(C_CSR, C.all_data, sr_C);
CTF_int::cdealloc(C.all_data);
C_CSR = ans;
}
CTF_int::cdealloc(has_col);
CTF_int::cdealloc(rev_col);
}
void ccsrmm(int m,
int n,
int k,
char const * A,
int const * JA,
int const * IA,
int64_t nnz_A,
char const * B,
char * C,
CTF_int::algstrct const * sr_C) const {
csrmm(m,n,k,(dtype_A const *)A,JA,IA,nnz_A,(dtype_B const *)B, (dtype_C *)C, sr_C);
}
void ccsrmultd
(int m,
int n,
int k,
char const * A,
int const * JA,
int const * IA,
int nnz_A,
char const * B,
int const * JB,
int const * IB,
int nnz_B,
char * C,
CTF_int::algstrct const * sr_C) const {
csrmultd(m,n,k,(dtype_A const *)A,JA,IA,nnz_A,(dtype_B const *)B,JB,IB,nnz_B,(dtype_C *)C,sr_C);
}
void ccsrmultcsr
(int m,
int n,
int k,
char const * A,
int const * JA,
int const * IA,
int nnz_A,
char const * B,
int const * JB,
int const * IB,
int nnz_B,
char *& C_CSR,
CTF_int::algstrct const * sr_C) const {
csrmultcsr(m,n,k,(dtype_A const *)A,JA,IA,nnz_A,(dtype_B const *)B, JB, IB, nnz_B, C_CSR, sr_C);
}
};
/**
* \brief custom function f : (X * Y * Z) -> Z applied on three tensors as contraction:
* e.g. f(A["ij"],B["ij"],C["ij"])
*/
template<typename dtype_A=double, typename dtype_B=dtype_A, typename dtype_C=dtype_A>
class Bivar_Transform : public CTF_int::bivar_function {
public:
/**
* \brief function signature for element-wise multiplication, compute b=f(a)
*/
//void (*f)(dtype_A, dtype_B &);
std::function<void(dtype_A, dtype_B, dtype_C &)> f;
/**
* \brief constructor takes function pointers to compute B=f(A));
* \param[in] f_ linear function (type_A)->(type_B)
*/
Bivar_Transform(std::function<void(dtype_A, dtype_B, dtype_C &)> f_)
: CTF_int::bivar_function() {
f = f_;
}
/**
* \brief constructor takes function pointers to compute C=f(A,B);
* \param[in] f_ bivariate function (type_A,type_B)->(type_C)
* \param[in] is_comm whether function is commutative
*/
Bivar_Transform(std::function<void(dtype_A, dtype_B, dtype_C &)> f_,
bool is_comm)
: CTF_int::bivar_function(is_comm){
f=f_;
}
/**
* \brief compute f(a,b)
* \param[in] a pointer to first operand
* \param[in] b pointer to second operand
* \param[in,out] c value that is accumulated to
* \param[in] sr_B algebraic structure for b, here is ignored
*/
void acc_f(char const * a, char const * b, char * c, CTF_int::algstrct const * sr_B) const {
f(((dtype_A*)a)[0], ((dtype_B*)b)[0], ((dtype_C*)c)[0]);
}
/**
* \brief apply function f to value stored at a, for an accumulator, this is the same as acc_f below
* \param[in] a pointer to operand that will be cast to dtype
* \param[in] b pointer to second operand that will be cast to dtype
* \param[in,out] c result &f(*a,*b) of applying f on value of (different type) on a
*/
void apply_f(char const * a, char const * b, char * c) const { acc_f(a,b,c,NULL); }
bool is_accumulator() const { return true; }
};
template<typename dtype_A=double, typename dtype_B=dtype_A, typename dtype_C=dtype_A>
class Function {
public:
bool is_univar;
Univar_Function<dtype_A, dtype_B> * univar;
bool is_bivar;
Bivar_Function<dtype_A, dtype_B, dtype_C> * bivar;
Function(std::function<dtype_B(dtype_A)> f_){
is_univar = true;
is_bivar = false;
univar = new Univar_Function<dtype_A, dtype_B>(f_);
}
Function(std::function<dtype_C(dtype_A,dtype_B)> f_, bool is_comm=false){
is_univar = false;
is_bivar = true;
bivar = new Bivar_Function<dtype_A, dtype_B, dtype_C>(f_,is_comm);
}
CTF_int::Unifun_Term operator()(CTF_int::Term const & A) const {
assert(is_univar);
return univar->operator()(A);
}
CTF_int::Bifun_Term operator()(CTF_int::Term const & A, CTF_int::Term const & B) const {
assert(is_bivar);
return bivar->operator()(A,B);
}
operator Univar_Function<dtype_A, dtype_B>() const {
assert(is_univar);
return *univar;
}
operator Bivar_Function<dtype_A, dtype_B, dtype_C>() const {
assert(is_bivar);
return *bivar;
}
~Function(){
if (is_univar) delete(univar);
if (is_bivar) delete(bivar);
}
};
template<typename dtype_A=double, typename dtype_B=dtype_A, typename dtype_C=dtype_A>
class Transform {
public:
bool is_endo;
Endomorphism<dtype_A> * endo;
bool is_univar;
Univar_Transform<dtype_A, dtype_B> * univar;
bool is_bivar;
Bivar_Transform<dtype_A, dtype_B, dtype_C> * bivar;
Transform(std::function<void(dtype_A&)> f_){
is_endo = true;
is_univar = false;
is_bivar = false;
endo = new Endomorphism<dtype_A>(f_);
}
Transform(std::function<void(dtype_A, dtype_B&)> f_){
is_endo = false;
is_univar = true;
is_bivar = false;
univar = new Univar_Transform<dtype_A, dtype_B>(f_);
}
Transform(std::function<void(dtype_A, dtype_B, dtype_C&)> f_){
is_endo = false;
is_univar = false;
is_bivar = true;
bivar = new Bivar_Transform<dtype_A, dtype_B, dtype_C>(f_);
}
~Transform(){
if (is_endo) delete endo;
if (is_univar) delete univar;
if (is_bivar) delete bivar;
}
void operator()(CTF_int::Term const & A) const {
assert(is_endo);
endo->operator()(A);
}
void operator()(CTF_int::Term const & A, CTF_int::Term const & B) const {
assert(is_univar);
univar->operator()(A,B);
}
void operator()(CTF_int::Term const & A, CTF_int::Term const & B, CTF_int::Term const & C) const {
assert(is_bivar);
bivar->operator()(A,B,C);
}
operator Bivar_Transform<dtype_A, dtype_B, dtype_C>(){
assert(is_bivar);
return *bivar;
}
operator Univar_Transform<dtype_A, dtype_B>(){
assert(is_univar);
return *univar;
}
operator Endomorphism<dtype_A>(){
assert(is_endo);
return *endo;
}
bool is_accumulator() const { return true; }
};
/**
* @}
*/
}
#endif
|
lock-unrelated.c | /*
* lock-unrelated.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run-race | FileCheck %s
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int var = 0;
omp_lock_t lock;
omp_init_lock(&lock);
#pragma omp parallel num_threads(2) shared(var)
{
omp_set_lock(&lock);
// Dummy locking.
omp_unset_lock(&lock);
var++;
}
omp_destroy_lock(&lock);
int error = (var != 2);
fprintf(stderr, "DONE\n");
return error;
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}lock-unrelated.c:31
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}lock-unrelated.c:31
// CHECK: DONE
// CHECK: ThreadSanitizer: reported 1 warnings
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
pluto_codegen_if.c | /*
* Pluto: An automatic parallelizer and locality optimizer
*
* Copyright (C) 2007-2012 Uday Bondhugula
*
* This software is available under the MIT license. Please see LICENSE in the
* top-level directory for details.
*
* This file is part of libpluto.
*
*/
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "pluto_codegen_if.h"
#include "ast_transform.h"
#include "constraints.h"
#include "math_support.h"
#include "pluto/matrix.h"
#include "pluto/pluto.h"
#include "program.h"
#include "version.h"
#include "cloog/cloog.h"
#include "osl/extensions/loop.h"
static int get_first_point_loop(Stmt *stmt, const PlutoProg *prog) {
int i, first_point_loop;
if (stmt->type != ORIG) {
for (i = 0; i < prog->num_hyperplanes; i++) {
if (!pluto_is_hyperplane_scalar(stmt, i)) {
return i;
}
}
/* No non-scalar hyperplanes */
return 0;
}
for (i = stmt->last_tile_dim + 1; i < stmt->trans->nrows; i++) {
if (stmt->hyp_types[i] == H_LOOP)
break;
}
if (i < prog->num_hyperplanes) {
first_point_loop = i;
} else {
/* Should come here only if
* it's a 0-d statement */
first_point_loop = 0;
}
return first_point_loop;
}
/* Generate and print .cloog file from the transformations computed */
void pluto_gen_cloog_file(FILE *fp, const PlutoProg *prog) {
int i;
Stmt **stmts = prog->stmts;
int nstmts = prog->nstmts;
int npar = prog->npar;
PlutoContext *context = prog->context;
IF_DEBUG(printf("[pluto] generating Cloog file...\n"));
fprintf(fp, "# CLooG script generated automatically by PLUTO %s\n",
PLUTO_VERSION);
fprintf(fp, "# language: C\n");
fprintf(fp, "c\n\n");
/* Context: setting conditions on parameters */
PlutoConstraints *param_ctx = pluto_constraints_dup(prog->param_context);
pluto_constraints_intersect_isl(param_ctx, prog->codegen_context);
pluto_constraints_print_polylib(fp, param_ctx);
pluto_constraints_free(param_ctx);
/* Setting parameter names */
fprintf(fp, "\n1\n");
for (i = 0; i < npar; i++) {
fprintf(fp, "%s ", prog->params[i]);
}
fprintf(fp, "\n\n");
fprintf(fp, "# Number of statements\n");
fprintf(fp, "%d\n\n", nstmts);
/* Print statement domains */
for (i = 0; i < nstmts; i++) {
fprintf(fp, "# S%d (%s)\n", stmts[i]->id + 1, stmts[i]->text);
pluto_constraints_print_polylib(fp, stmts[i]->domain);
fprintf(fp, "0 0 0\n\n");
}
fprintf(fp, "# we want cloog to set the iterator names\n");
fprintf(fp, "0\n\n");
fprintf(fp, "# Number of scattering functions\n");
if (nstmts >= 1 && stmts[0]->trans != NULL) {
fprintf(fp, "%d\n\n", nstmts);
/* Print scattering functions */
for (i = 0; i < nstmts; i++) {
fprintf(fp, "# T(S%d)\n", i + 1);
PlutoConstraints *sched = pluto_stmt_get_schedule(stmts[i]);
pluto_constraints_print_polylib(fp, sched);
fprintf(fp, "\n");
pluto_constraints_free(sched);
}
/* Setting target loop names (all stmts have same number of hyperplanes */
fprintf(fp, "# we will set the scattering dimension names\n");
fprintf(fp, "%d\n", stmts[0]->trans->nrows);
for (i = 0; i < stmts[0]->trans->nrows; i++) {
fprintf(fp, "t%d ", i + 1);
}
fprintf(fp, "\n");
} else {
fprintf(fp, "0\n\n");
}
}
static void gen_stmt_macro(const Stmt *stmt, PlutoOptions *options,
FILE *outfp) {
int j;
for (j = 0; j < stmt->dim; j++) {
if (stmt->iterators[j] == NULL) {
printf("Iterator name not set for S%d; required \
for generating declarations\n",
stmt->id + 1);
assert(0);
}
}
fprintf(outfp, "#define S%d", stmt->id + 1);
fprintf(outfp, "(");
for (j = 0; j < stmt->dim; j++) {
if (j != 0)
fprintf(outfp, ",");
fprintf(outfp, "%s", stmt->iterators[j]);
}
fprintf(outfp, ")\t");
/* Generate pragmas for Bee/Cl@k */
if (options->bee) {
fprintf(outfp, " __bee_schedule");
for (j = 0; j < stmt->trans->nrows; j++) {
fprintf(outfp, "[");
pluto_affine_function_print(outfp, stmt->trans->val[j], stmt->dim,
(const char **)stmt->iterators);
fprintf(outfp, "]");
}
fprintf(outfp, " _NL_DELIMIT_ ");
}
fprintf(outfp, "%s\n", stmt->text);
}
/* Generate variable declarations and macros */
int generate_declarations(const PlutoProg *prog, FILE *outfp) {
int i;
Stmt **stmts = prog->stmts;
int nstmts = prog->nstmts;
/* Generate statement macros */
for (i = 0; i < nstmts; i++) {
gen_stmt_macro(stmts[i], prog->context->options, outfp);
}
fprintf(outfp, "\n");
/* Scattering iterators. */
if (prog->num_hyperplanes >= 1) {
fprintf(outfp, "\t\tint ");
for (i = 0; i < prog->num_hyperplanes; i++) {
if (i != 0)
fprintf(outfp, ", ");
fprintf(outfp, "t%d", i + 1);
if (prog->hProps[i].unroll) {
fprintf(outfp, ", t%dt, newlb_t%d, newub_t%d", i + 1, i + 1, i + 1);
}
}
fprintf(outfp, ";\n\n");
}
if (prog->context->options->parallel) {
fprintf(outfp, "\tint lb, ub, lbp, ubp, lb2, ub2;\n");
}
/* For vectorizable loop bound replacement */
fprintf(outfp, "\tregister int lbv, ubv;\n\n");
return 0;
}
/* Call cloog and generate code for the transformed program
*
* cloogf, cloogl: set to -1 if you want the function to decide
*
* --cloogf, --cloogl overrides everything; next cloogf, cloogl if != -1,
* then the function takes care of the rest
*/
int pluto_gen_cloog_code(const PlutoProg *prog, int cloogf, int cloogl,
FILE *cloogfp, FILE *outfp) {
CloogInput *input;
CloogOptions *cloogOptions;
CloogState *state;
PlutoContext *context = prog->context;
PlutoOptions *options = context->options;
int i;
struct clast_stmt *root;
Stmt **stmts = prog->stmts;
int nstmts = prog->nstmts;
state = cloog_state_malloc();
cloogOptions = cloog_options_malloc(state);
cloogOptions->fs = (int *)malloc(nstmts * sizeof(int));
cloogOptions->ls = (int *)malloc(nstmts * sizeof(int));
cloogOptions->fs_ls_size = nstmts;
for (i = 0; i < nstmts; i++) {
cloogOptions->fs[i] = -1;
cloogOptions->ls[i] = -1;
}
cloogOptions->name = (char *)"CLooG file produced by PLUTO";
cloogOptions->compilable = 0;
cloogOptions->esp = 1;
cloogOptions->strides = 1;
cloogOptions->quiet = !options->debug;
/* Generates better code in general */
cloogOptions->backtrack = options->cloogbacktrack;
if (options->cloogf >= 1 && options->cloogl >= 1) {
cloogOptions->f = options->cloogf;
cloogOptions->l = options->cloogl;
} else {
if (cloogf >= 1 && cloogl >= 1) {
cloogOptions->f = cloogf;
cloogOptions->l = cloogl;
} else if (options->tile) {
for (i = 0; i < nstmts; i++) {
cloogOptions->fs[i] = get_first_point_loop(stmts[i], prog) + 1;
cloogOptions->ls[i] = prog->num_hyperplanes;
}
} else {
/* Default */
cloogOptions->f = 1;
/* last level to optimize: number of hyperplanes;
* since Pluto provides full-ranked transformations */
cloogOptions->l = prog->num_hyperplanes;
}
}
if (!options->silent) {
if (nstmts >= 1 && cloogOptions->fs[0] >= 1) {
printf("[pluto] using statement-wise -fs/-ls options: ");
for (i = 0; i < nstmts; i++) {
printf("S%d(%d,%d), ", i + 1, cloogOptions->fs[i], cloogOptions->ls[i]);
}
printf("\n");
} else {
printf("[pluto] using Cloog -f/-l options: %d %d\n", cloogOptions->f,
cloogOptions->l);
}
}
if (options->cloogsh)
cloogOptions->sh = 1;
cloogOptions->name = (char *)"PLUTO-produced CLooG file";
fprintf(outfp, "/* Start of CLooG code */\n");
/* Get the code from CLooG */
IF_DEBUG(printf("[pluto] cloog_input_read\n"));
input = cloog_input_read(cloogfp, cloogOptions);
IF_DEBUG(printf("[pluto] cloog_clast_create\n"));
root = cloog_clast_create_from_input(input, cloogOptions);
if (options->prevector) {
pluto_mark_vector(root, prog, cloogOptions);
}
if (options->parallel) {
pluto_mark_parallel(root, prog, cloogOptions);
}
/* Unroll jamming has to be done at the end. We do not want the epilogue to be
* marked parallel as there will be very few iterations in it. Properties of
* the inner loops that are marked PARALLEL or PARALLEL_VEC will be retained
* during unroll jamming. */
if (options->unrolljam) {
pluto_mark_unroll_jam(root, prog, cloogOptions, options->ufactor);
clast_unroll_jam(root);
}
clast_pprint(outfp, root, 0, cloogOptions);
cloog_clast_free(root);
fprintf(outfp, "/* End of CLooG code */\n");
cloog_options_free(cloogOptions);
cloog_state_free(state);
return 0;
}
/// Generate code for a single multicore. Clast will insert OpenMP
/// pragmas later.
int pluto_multicore_codegen(FILE *cloogfp, FILE *outfp, const PlutoProg *prog) {
if (prog->context->options->parallel) {
fprintf(outfp, "#include <omp.h>\n\n");
}
generate_declarations(prog, outfp);
if (prog->context->options->multipar) {
fprintf(outfp, "\tomp_set_nested(1);\n");
fprintf(outfp, "\tomp_set_num_threads(2);\n");
}
pluto_gen_cloog_code(prog, -1, -1, cloogfp, outfp);
return 0;
}
/* Decides which loops to mark parallel and generates the corresponding OpenMP
* pragmas and writes them out to a file. They are later read by a script
* (ploog) and appropriately inserted into the output Cloog code
*
* Returns: the number of parallel loops for which OpenMP pragmas were generated
*
* Generate the #pragma comment -- will be used by a syntactic scanner
* to put in place -- should implement this with CLast in future */
int pluto_omp_parallelize(PlutoProg *prog) {
int i;
FILE *outfp = fopen(".pragmas", "w");
if (!outfp)
return 1;
HyperplaneProperties *hProps = prog->hProps;
PlutoContext *context = prog->context;
PlutoOptions *options = context->options;
int loop;
/* IMPORTANT: Note that by the time this function is called, pipelined
* parallelism has already been converted to inner parallelism in
* tile space (due to a tile schedule) - so we don't need check any
* PIPE_PARALLEL properties
*/
/* Detect the outermost sync-free parallel loop - find upto two of them if
* the multipar option is set */
int num_parallel_loops = 0;
for (loop = 0; loop < prog->num_hyperplanes; loop++) {
if (hProps[loop].dep_prop == PARALLEL && hProps[loop].type != H_SCALAR) {
// Remember our loops are 1-indexed (t1, t2, ...)
fprintf(outfp, "t%d #pragma omp parallel for shared(", loop + 1);
for (i = 0; i < loop; i++) {
fprintf(outfp, "t%d,", i + 1);
}
for (i = 0; i < num_parallel_loops + 1; i++) {
if (i != 0)
fprintf(outfp, ",");
fprintf(outfp, "lb%d,ub%d", i + 1, i + 1);
}
fprintf(outfp, ") private(");
if (options->prevector) {
fprintf(outfp, "ubv,lbv,");
}
/* Lower and upper scalars for parallel loops yet to be marked */
/* NOTE: we extract up to 2 degrees of parallelism
*/
if (options->multipar) {
for (i = num_parallel_loops + 1; i < 2; i++) {
fprintf(outfp, "lb%d,ub%d,", i + 1, i + 1);
}
}
for (i = loop; i < prog->num_hyperplanes; i++) {
if (i != loop)
fprintf(outfp, ",");
fprintf(outfp, "t%d", i + 1);
}
fprintf(outfp, ")\n");
num_parallel_loops++;
if (!options->multipar || num_parallel_loops == 2) {
break;
}
}
}
IF_DEBUG(fprintf(stdout, "[pluto] marked %d loop(s) parallel\n",
num_parallel_loops));
fclose(outfp);
return num_parallel_loops;
}
/*
* Get a list of to-be-parallelized loops frop PlutoProg.
*/
osl_loop_p pluto_get_parallel_loop_list(const PlutoProg *prog,
int vloopsfound) {
unsigned i, j, nploops;
osl_loop_p ret_loop = NULL;
PlutoContext *context = prog->context;
Ploop **ploops = pluto_get_dom_parallel_loops(prog, &nploops);
IF_DEBUG(printf("[pluto_parallel_loop_list] parallelizable loops\n"););
IF_DEBUG(pluto_loops_print(ploops, nploops););
for (i = 0; i < nploops; i++) {
osl_loop_p newloop = osl_loop_malloc();
char iter[13];
snprintf(iter, sizeof(iter), "t%d", ploops[i]->depth + 1);
newloop->iter = strdup(iter);
newloop->nb_stmts = ploops[i]->nstmts;
newloop->stmt_ids = (int *)malloc(ploops[i]->nstmts * sizeof(int));
unsigned max_depth = 0;
for (j = 0; j < ploops[i]->nstmts; j++) {
Stmt *stmt = ploops[i]->stmts[j];
newloop->stmt_ids[j] = stmt->id + 1;
if (stmt->trans->nrows > max_depth)
max_depth = stmt->trans->nrows;
}
newloop->directive += CLAST_PARALLEL_OMP;
char *private_vars = (char *)malloc(128);
private_vars[0] = '\0';
if (vloopsfound)
strcpy(private_vars, "lbv, ubv");
unsigned depth = ploops[i]->depth + 1;
for (depth++; depth <= max_depth; depth++) {
sprintf(private_vars + strlen(private_vars), "t%d,", depth);
}
if (strlen(private_vars))
private_vars[strlen(private_vars) - 1] = '\0'; // remove last comma
newloop->private_vars = strdup(private_vars);
free(private_vars);
// add new loop to looplist
osl_loop_add(newloop, &ret_loop);
}
pluto_loops_free(ploops, nploops);
return ret_loop;
}
/// Get a list of to-be-vectorized loops from PlutoProg.
osl_loop_p pluto_get_vector_loop_list(const PlutoProg *prog) {
unsigned i, j, nploops;
osl_loop_p ret_loop = NULL;
PlutoContext *context = prog->context;
Ploop **ploops = pluto_get_parallel_loops(prog, &nploops);
for (i = 0; i < nploops; i++) {
/* Only the innermost ones */
if (!pluto_loop_is_innermost(ploops[i], prog))
continue;
IF_DEBUG(printf("[pluto_get_vector_loop_list] marking loop\n"););
IF_DEBUG(pluto_loop_print(ploops[i]););
osl_loop_p newloop = osl_loop_malloc();
char iter[13];
snprintf(iter, sizeof(iter), "t%d", ploops[i]->depth + 1);
newloop->iter = strdup(iter);
newloop->nb_stmts = ploops[i]->nstmts;
newloop->stmt_ids = (int *)malloc(ploops[i]->nstmts * sizeof(int));
for (j = 0; j < ploops[i]->nstmts; j++) {
newloop->stmt_ids[j] = ploops[i]->stmts[j]->id + 1;
}
newloop->directive += CLAST_PARALLEL_VEC;
// add new loop to looplist
osl_loop_add(newloop, &ret_loop);
}
pluto_loops_free(ploops, nploops);
return ret_loop;
}
|
mergesam.c | #define _MODULE_GMAPPER
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include <getopt.h>
#include <assert.h>
#include <ctype.h>
#include <omp.h>
#include "mergesam.h"
#include "sam2pretty_lib.h"
#include "file_buffer.h"
#include "fastx_readnames.h"
#include "sam_reader.h"
#include "../common/util.h"
#include "../gmapper/gmapper-defaults.h"
#include "../gmapper/gmapper.h"
#include "mergesam_heap.h"
runtime_options options;
fastx_readnames fxrn;
//Pretty linked list
pp_ll * sam_headers;
pp_ll ** pp_ll_index;
pp_ll * master_ll;
//SAM file ios
heap_pa * thread_heaps;
typedef struct sam_reader sam_reader;
sam_reader ** sam_files;
//char * sam_header_filename=NULL;
//sam_header_filename=NULL;
int64_t genome_length=0;
char * command_line=NULL;
//char * reads_filename;
//reads_filename=NULL;
int64_t genome_length_from_headers(char ** sam_lines, int header_entries) {
int64_t g_length=0;
int i;
for (i=0; i<header_entries; i++) {
char * line = sam_lines[i];
if (line[0]=='@' && line[1]=='S' && line[2]=='Q') {
int x;
for (x=4; line[x]!='\0'; x++) {
if (line[x]=='\t' && line[x+1]=='L' && line[x+2]=='N') {
x++;
break;
}
}
if (line[x]=='\0' || line[x]!='L') {
fprintf(stderr,"Invalid sam header format\n");
exit(1);
}
assert(line[x]=='L');
x++;
assert(line[x]!='\0'); //N
x++;
assert(line[x]!='\0'); //:
x++;
assert(line[x]!='\0');
int y=x;
while (line[y]!='\t' && line[y]!='\0' && line[y]!='\n') {
y++;
}
char old_char = line[y];
line[y]='\0';
g_length+=atoi(line+x);
line[y]=old_char;
}
}
return g_length;
}
void process_sam_headers() {
if (found_sam_headers) {
int header_entries=0;
int i;
for (i=0; i<options.number_of_sam_files; i++) {
header_entries+=sam_files[i]->sam_headers->length;
}
//get pointers to each header line
char ** sam_lines=(char**)malloc(sizeof(char*)*(header_entries+1)); //add in the command line
if (sam_lines==NULL) {
fprintf(stderr,"Failed to allocate memory for sam_header entries!\n");
exit(1);
}
int index=0;
int pg_id=0;
for (i=0; i<options.number_of_sam_files; i++) {
pretty * pa = sam_files[i]->sam_headers->head;
while(pa!=NULL) {
char * s = pa->sam_string;
if (strncmp(s,"@PG ID:",strlen("@PG ID:"))==0) {
assert(pg_id<100000000);
char * x = (char*)malloc(sizeof(char)*(strlen(s)+13));
sprintf(x,"%s%d-%s","@PG ID:",pg_id++,s+strlen("@PG ID:"));
pa->sam_string=x;
}
sam_lines[index++]=pa->sam_string;
pa=pa->next;
}
}
assert(command_line!=NULL);
genome_length=genome_length_from_headers(sam_lines, header_entries);
fprintf(stderr,"Calculated genome length to be , %ld\n",genome_length);
//sam_lines[index++]=command_line;
//want to sort the headers here
qsort(sam_lines, header_entries, sizeof(char*),sam_header_sort);
//want to print the headers here
assert(index>0);
fprintf(stdout,"%s\n",sam_lines[0]);
bool printed_pg_self=false;
if (sam_header_filename==NULL) {
for (i=1; i<index; i++) {
int ret=sam_lines[i-1]!=NULL ? strcmp(sam_lines[i],sam_lines[i-1]) : 1;
if (!printed_pg_self && strncmp(sam_lines[i],"@PG",strlen("@PG"))==0) {
fprintf(stdout,"%s\n",command_line);
printed_pg_self=true;
}
if (ret!=0) {
fprintf(stdout,"%s\n",sam_lines[i]);
}
if (strncmp(sam_lines[i],"@PG ID:",strlen("@PG ID:"))==0) {
free(sam_lines[i]);
sam_lines[i]=NULL;
}
}
}
if (!printed_pg_self) {
fprintf(stdout,"%s\n",command_line);
printed_pg_self=true;
}
//get the genome length!!!
free(sam_lines);
memset(sam_headers,0,sizeof(pp_ll)*options.number_of_sam_files);
found_sam_headers=false;
}
}
void usage(char * s) {
fprintf(stderr,
"usage: %s [options/parameters] <r> <s1> <s2> ...\n", s);
fprintf(stderr,
" <r> Reads filename, if paired then one of the two paired files\n");
fprintf(stderr,
" <s?> A SAM file input for mergigng\n");
fprintf(stderr,
"Runtime: (all sizes are in bytes unless specified)\n");
fprintf(stderr,
" --buffer-size File buffer size in memory per file (Default: %d)\n",DEF_BUFFER_SIZE);
fprintf(stderr,
" --read-size Read size, read into buffer with this (Default: %d)\n",DEF_READ_SIZE);
fprintf(stderr,
" -s/--stack-size Input alignment stack size (Default: %d)\n",DEF_ALIGNMENTS_STACK_SIZE);
fprintf(stderr,
" --read-rate How many reads to process at once (Default: %d)\n",DEF_READ_RATE);
fprintf(stderr,
"Output options:\n");
fprintf(stderr,
" --un Output unaligned FAST(A/Q) file (Default: disabled)\n");
fprintf(stderr,
" --al Output aligned FAST(A/Q) file (Default: disabled)\n");
fprintf(stderr,
" --sam-unaligned Unaligned reads in SAM output (Default: disabled)\n");
fprintf(stderr,
" -o/--report The maximum alignments to report (Default: %d)\n",DEF_MAX_OUTPUTS);
fprintf(stderr,
" -N/--threads The number of threads to use (Default: 1)\n");
fprintf(stderr,
" -E/--sam Output in SAM format (Default: disabled)\n");
fprintf(stderr,
" -Q/--fastq Reads are in fastq format (Default: auto-detect)\n");
fprintf(stderr,
" --strata Print only the best scoring hits\n");
fprintf(stderr,
" --max-alignments Max. align. per read (0=all) (Default: %d)\n",DEF_MAX_ALIGNMENTS);
fprintf(stderr,
" --no-half-paired Do not output half-paired alignments (Default: disabled)\n");
fprintf(stderr,
" --insert-size-dist Use mean,stdev for insert size dist. (Default: %d,%d)\n",DEF_INSERT_SIZE_MEAN,DEF_INSERT_SIZE_STDDEV);
fprintf(stderr,
" --single-best-mapping See documentation, output onlt *best (Default: disabled)\n");
fprintf(stderr,
" --min-mapq Minimum mapping quality (Default: 0)\n");
fprintf(stderr,
" --all-contigs Output given no more merging after (Default: disabled)\n");
fprintf(stderr,
" --half-paired Output half-paired mappings (Default: enabled)\n");
fprintf(stderr,
" --no-mapping-qualities Do not compute mapping qualities (Default: disabled)\n");
fprintf(stderr,
" --leave-mapq Leave the mapq field as it was (Default: disabled)\n");
fprintf(stderr,
" --sam-header Use file as SAM header\n");
fprintf(stderr,
" --no-improper-mappings Do not pair up half-paired mappings (Default: disabled)\n");
fprintf(stderr,
" --no-autodetect-input Do not try to auto-detect FAST(A/Q) (Default: disabled)\n");
fprintf(stderr,
" --help This usage screen\n");
exit(1);
}
struct option long_op[] =
{
{"un",1,0,10},
{"al",1,0,11},
{"sam-unaligned",0,0,12},
{"report",1,0,'o'},
{"threads",1,0,'N'},
{"sam",0,0,'E'},
{"fastq", 0, 0, 'Q'},
{"strata",0,0,9},
{"max-alignments",1,0,14},
{"no-half-paired",0,0,19},
{"insert-size-dist",1,0,25},
{"single-best-mapping",0,0,37},
{"all-contigs",0,0,38},
{"half-paired",0,0,41},
{"no-mapping-qualities",0,0,39},
{"sam-header",1,0,2},
{"no-improper-mappings",0,0,42},
{"no-autodetect-input",0,0,48},
{"leave-mapq-untouched",0,0,3},
{"help", 0, 0, 5},
{"buffer-size", 1, 0, 6},
{"read-size", 1, 0, 7},
{"read-rate",1,0,8},
{"stack-size",1,0,'s'},
{"min-mapq",1,0,4},
{0,0,0,0}
};
static inline void fill_fb(file_buffer * fb) {
while (!fb->exhausted) {
fill_read_buffer(&fb->frb);
add_read_buffer_to_main(fb);
if (!fb->exhausted && !fb->changed && fb->frb.eof==0) {
fprintf(stderr,"too small buffer!\n");
exit(1);
}
}
//fprintf(stdout,"Filled %lu to %lu of %lu |%s|\n",fb->unseen_start, fb->unseen_end, fb->size,fb->base);
}
static void print_buffer(file_buffer * fb) {
size_t start = fb->unseen_start;
size_t end = fb->unseen_end;
size_t index;
fprintf(stderr,"|FB|");
fprintf(stderr,"%lu %lu\n",fb->unseen_start, fb->unseen_end);
for (index=start; index<end; index++) {
fprintf(stderr,"%c",fb->base[index%fb->size]);
}
fprintf(stderr,"|END FB|\n");
}
static void print_frb_buffer(file_buffer * fb) {
size_t start=fb->frb.seen;
size_t index;
fprintf(stderr,"|FRB|\n");
for (index=start; index<fb->frb.size; index++) {
fprintf(stderr,"%c",fb->frb.base[index]);
}
fprintf(stderr,"|END FRB|\n");
}
static size_t inline string_to_byte_size(char * s) {
char * x=s;
while (isdigit(x[0])) {x++;};
size_t multiplier=1;
if (*x=='K') {
multiplier=1024;
} else if (*x=='M') {
multiplier=1024*1024;
} else if (*x=='G') {
multiplier=1024*1024*1024;
}
char old_x=*x;
*x='\0';
int ret=atoi(s);
*x=old_x;
if (ret<=0) {
return 0;
}
return ret*multiplier;
}
int main (int argc, char ** argv) {
int i;
char pg_line_prefix[]="@PG ID:mergesam VN:2.2.0 CL:";
size_t command_line_length=strlen(pg_line_prefix);
for (i=0; i<argc; i++) {
command_line_length+=strlen(argv[i])+2;
}
command_line=(char*)malloc(sizeof(char)*command_line_length);
if (command_line==NULL) {
fprintf(stderr,"Failed to allocate memory to store command line arguments\n");
exit(1);
}
command_line[0]='\0';
strcpy(command_line,pg_line_prefix);
for (i=0; i<argc; i++) {
strcat(command_line,argv[i]);
strcat(command_line," ");
}
options.unaligned_reads_file=NULL;
options.aligned_reads_file=NULL;
options.sam_unaligned=sam_unaligned;
options.max_outputs=DEF_MAX_OUTPUTS;
options.threads=DEF_NUM_THREADS;
options.sam_format=Eflag;
options.fastq=Qflag;
options.fastq_set=autodetect_input ? false : true; //has the fastq flag been set, i.e. has it been set to fastq=false or fastq=true
options.strata=strata_flag;
options.max_alignments=DEF_MAX_ALIGNMENTS;
options.half_paired=half_paired; //for no half paired default setting
options.single_best=single_best_mapping;
options.insert_size_mean=DEF_INSERT_SIZE_MEAN;
options.insert_size_stddev=DEF_INSERT_SIZE_STDDEV;
options.all_contigs=all_contigs;
/* see above */ //options.half_paired=half_paired; //for no half paired default setting
options.no_mapping_qualities=compute_mapping_qualities ? false : true;
sam_header_filename=NULL;
options.no_improper_mappings=improper_mappings ? false : true;
options.no_autodetect_input=autodetect_input ? false : true;
options.leave_mapq=false;
options.paired=false;
options.unpaired=false;
options.colour_space=false;
options.letter_space=false;
options.mode_set=false;
options.buffer_size=DEF_BUFFER_SIZE;
options.read_size=DEF_READ_SIZE;
options.read_rate=DEF_READ_RATE;
options.alignments_stack_size=DEF_ALIGNMENTS_STACK_SIZE;
options.min_mapq=0;
found_sam_headers=false;
int op_id;
char short_op[] = "o:QN:Es:au";
char c = getopt_long(argc, argv, short_op, long_op, &op_id);
while (c != EOF) {
switch (c) {
//fastq
case 'Q':
options.fastq=true;
break;
//no-auto-detect
case 48:
options.no_autodetect_input=true;
break;
//single-best
case 37:
options.single_best=true;
break;
//buffer-size
case 6:
options.buffer_size=string_to_byte_size(optarg);
break;
//read-size
case 7:
options.read_size=string_to_byte_size(optarg);
break;
//read-rate
case 8:
options.read_rate=atol(optarg);
break;
//threads
case 'N':
options.threads=atoi(optarg);
break;
//half-paired
case 41:
options.half_paired=true;
break;
//no-half-paired
case 19:
options.half_paired=false;
break;
//sam-unaligned
case 12:
options.sam_unaligned=true;
break;
//strata
case 9:
options.strata=true;
break;
//report
case 'o':
options.max_outputs=atoi(optarg);
if (options.max_outputs<=0) {
fprintf(stderr,"Please specify a max_output that is positive!\n");
usage(argv[0]);
}
break;
//max-alignments
case 14:
options.max_alignments=atoi(optarg);
if (options.max_alignments<=0) {
fprintf(stderr,"Please specify a max_alignments that is positive!\n");
usage(argv[0]);
}
break;
//help
case 5:
usage(argv[0]);
break;
//sam-header
case 2:
{
sam_header_filename=optarg;
FILE * sam_header_file = fopen(sam_header_filename,"r");
if (sam_header_file==NULL) {
perror("Failed to open sam header file ");
usage(argv[0]);
}
size_t buffer_size=2046;
char buffer[buffer_size];
size_t read; bool ends_in_newline=true;
while ((read=fread(buffer,1,buffer_size-1,sam_header_file))) {
buffer[read]='\0';
fprintf(stdout,"%s",buffer);
if (buffer[read-1]=='\n') {
ends_in_newline=true;
} else {
ends_in_newline=false;
}
}
if (!ends_in_newline) {
fprintf(stdout,"\n");
}
}
//sam format
case 'E':
options.sam_format=true;
break;
//stack-size
case 's':
options.alignments_stack_size=atoi(optarg);
assert(options.alignments_stack_size>0);
break;
//al
case 11:
options.aligned_reads_file=fopen(optarg,"w");
if (options.aligned_reads_file==NULL) {
fprintf(stderr,"Failed to open file for writting %s\n",optarg);
exit(1);
}
break;
//un
case 10:
options.unaligned_reads_file=fopen(optarg,"w");
if (options.unaligned_reads_file==NULL) {
fprintf(stderr,"Failed to open file for writting %s\n",optarg);
exit(1);
}
break;
//insert-size-dist
case 25:
{
char * c = strtok(optarg, ",");
if (c == NULL) {
fprintf(stderr, "argmuent for insert-size-dist should be \"mean,stddev\" [%s]", optarg);
exit(1);
}
options.insert_size_mean = atof(c);
c = strtok(NULL, ",");
if (c == NULL) {
fprintf(stderr, "argmuent for insert-size-dist should be \"mean,stddev\" [%s]", optarg);
exit(1);
}
options.insert_size_stddev = atof(c);
}
break;
//all-contigs
case 38:
options.all_contigs=true;
break;
//no-mapping-qualities
case 39:
options.no_mapping_qualities=true;
break;
//no-improper-mappings
case 42:
options.no_improper_mappings=true;
break;
//leave-mapq
case 3:
options.leave_mapq=true;
break;
//min mapq
case 4:
options.min_mapq=atoi(optarg);
break;
default:
fprintf(stderr,"%d : %c , %d is not an option!\n",c,(char)c,op_id);
usage(argv[0]);
break;
}
c = getopt_long(argc, argv, short_op, long_op, &op_id);
}
/* SANITY CHECK ARGUMENTS */
/* {"un",1,0,10},
{"al",1,0,11},
{"sam-unaligned",0,0,12},
{"report",1,0,'o'},
{"threads",1,0,'N'},
{"sam",0,0,'E'},
{"fastq", 0, 0, 'Q'},
{"strata",0,0,9},
{"max-alignments",1,0,14},
{"no-half-paired",0,0,19},
{"insert-size-dist",1,0,25},
{"single-best-mapping",0,0,37},
{"all-contigs",0,0,38},
{"half-paired",0,0,41},
{"no-mapping-qualities",0,0,39},
{"sam-header",1,0,2},
{"no-improper-mappings",0,0,42},
{"no-autodetect-input",0,0,48}, */
if (options.unaligned_reads_file!=NULL && options.aligned_reads_file!=NULL) {
fprintf(stderr," ! Please, '--un' xor '--al' == 1!\n");
exit(1);
}
if (!options.sam_format && options.unaligned_reads_file==NULL && options.aligned_reads_file==NULL) {
fprintf(stderr," ! Mergesam currently only supports output in SAM or FAST(A/Q) format, please use one of '--un','--al',or '--sam'\n");
exit(1);
}
if (options.single_best && options.no_mapping_qualities) {
fprintf(stderr," ! '--single-best' cannot be used in combination with '--no-mapping-qualities'\n");
exit(1);
}
if (options.single_best) {
options.max_outputs=1;
fprintf(stderr," + Setting max outputs per class to 1, because of single_best.\n");
}
/* END OF SANITY CHECKING ... EVERYTHING IS SANE .. MAYBE ... */
omp_set_num_threads(options.threads);
fprintf(stderr," + Running with %d threads!\n",options.threads);
if (argc<=optind+1) {
fprintf(stderr," ! Please specify reads file and at least one sam file!\n");
usage(argv[0]);
}
memset(&fxrn,0,sizeof(fastx_readnames));
fxrn.reads_inmem=20*options.read_rate;
fxrn.read_names=(char*)malloc(sizeof(char)*fxrn.reads_inmem*SIZE_READ_NAME);
if (fxrn.read_names==NULL) {
fprintf(stderr," ! Failed to allocate memory for read_names\n");
exit(1);
}
//memset(fxrn.read_names,'Z',sizeof(char)*fxrn.reads_inmem*SIZE_READ_NAME);
argc-=optind;
argv+=optind;
//Variables for IO of read names
reads_filename=argv[0];
fprintf(stderr," + Using %s as reads filename\n",reads_filename);
argc--;
argv++;
options.number_of_sam_files=argc;
//Open each sam input file
sam_files=(sam_reader**)malloc(sizeof(sam_reader*)*options.number_of_sam_files);
if (sam_files==NULL) {
fprintf(stderr," ! Failed to allocate memory for sam_files!\n");
exit(1);
}
master_ll = (pp_ll*)malloc(sizeof(pp_ll)*options.read_rate);
if (master_ll==NULL) {
fprintf(stderr," ! Failed to allocate memory for master_ll\n");
exit(1);
}
memset(master_ll,0,sizeof(pp_ll)*options.read_rate);
//allocate memory for sam_headers
sam_headers=(pp_ll*)malloc(sizeof(pp_ll)*options.number_of_sam_files);
if (sam_headers==NULL) {
fprintf(stderr," ! Failed to allocate memory for sam_headers\n");
exit(1);
}
//index first the read then the file number
pp_ll_index = (pp_ll**)malloc(sizeof(pp_ll*)*options.read_rate*options.number_of_sam_files);
if (pp_ll_index==NULL) {
fprintf(stderr," ! Failed to allocate memory for pp_ll_index!\n");
exit(1);
}
for (i=0; i<options.number_of_sam_files; i++) {
sam_files[i]=sam_open(argv[i],&fxrn);
sam_files[i]->sam_headers=sam_headers+i;
sam_files[i]->fileno=i;
int j;
for (j=0; j<options.read_rate; j++) {
pp_ll_index[j*options.number_of_sam_files+i]=sam_files[i]->pp_lls+j*LL_ALL;
}
}
//calculate alignments cutoff
int32_t alignments_cutoff=options.max_alignments==0 ? options.max_outputs : MIN(options.max_alignments,options.max_outputs);
//max the heaps for each thread
thread_heaps=(heap_pa* )malloc(sizeof(heap_pa)*options.threads);
if (thread_heaps==NULL) {
fprintf(stderr," ! Failed to allocate memory for thread_heaps!\n");
exit(1);
}
for (i=0; i<options.threads; i++ ) {
heap_pa_init(thread_heaps+i,alignments_cutoff+(options.single_best ? 0 : 1));
fprintf(stderr," + Initializing thread_heap for thread %d at address %p\n",i,thread_heaps+i);
}
//initialize the thread buffers for each thread
output_buffer obs[options.threads];
for (i=0; i<options.threads; i++) {
obs[i].size=((size_t)(options.buffer_size*GROWTH_FACTOR))+1;
obs[i].base=(char*)malloc(sizeof(char)*obs[i].size);
if (obs[i].base==NULL) {
fprintf(stderr," ! Failed to allocate memory for the output buffers!\n");
exit(1);
}
obs[i].used=0;
}
size_t reads_processed=0;
clock_t start_time=clock();
long iterations=0;
//get the hit list, process it, do it again!
fprintf(stderr," + Setting up buffer with size %lu and read_size %lu\n",options.buffer_size,options.read_size);
bool have_non_eof_file=true;
fxrn.fb=fb_open(reads_filename,options.buffer_size,options.read_size);
if (!options.no_autodetect_input && !options.fastq_set) {
options.fastq=auto_detect_fastq(fxrn.fb->frb.file,reads_filename);
}
assert(options.unaligned_reads_file==NULL || options.aligned_reads_file==NULL);
FILE * output_file=(options.unaligned_reads_file!=NULL ? options.unaligned_reads_file : (options.aligned_reads_file!=NULL ? options.aligned_reads_file : stdout));
while (!fxrn.reads_exhausted && have_non_eof_file) {
//fprintf(stderr,"Reads seen %lu, reads unseen %lu, reads filled %lu\n",fxrn.reads_seen,fxrn.reads_unseen,fxrn.reads_filled);
//Populate the hitlist to as large as possible
while (!fxrn.reads_exhausted) {
fill_fb(fxrn.fb);
parse_reads(&fxrn);
}
//fprintf(stderr,"YReads seen %lu, reads unseen %lu, reads filled %lu\n",fxrn.reads_seen,fxrn.reads_unseen,fxrn.reads_filled);
//clear the sam_headers memory
memset(sam_headers,0,sizeof(pp_ll)*options.number_of_sam_files);
//Hit list in memory start processing!
int i;
clock_t last_time = clock ();
while (fxrn.reads_seen<fxrn.reads_filled) {
iterations++;
int reads_to_process=options.read_rate;
//READ IN DATA
#pragma omp parallel for schedule(guided)
for (i=0; i<options.number_of_sam_files; i++) {
obs[omp_get_thread_num()].used=0;
if (sam_files[i]->fb->frb.eof!=1 || sam_files[i]->fb->unseen_end!=sam_files[i]->fb->unseen_inter) {
fill_fb(sam_files[i]->fb);
parse_sam(sam_files[i],&fxrn);
}
}
process_sam_headers();
//find the minimum number of complete read alignments in memory
have_non_eof_file=false;
for (i=0; i<options.number_of_sam_files; i++) {
if (sam_files[i]->fb->frb.eof!=1 || sam_files[i]->fb->unseen_end!=sam_files[i]->fb->unseen_inter) {
//fprintf(stderr,"MIN(%d,%d)\n",reads_to_process,sam_files[i]->last_tested-fxrn.reads_seen);
reads_to_process=MIN(reads_to_process,sam_files[i]->last_tested-fxrn.reads_seen);
have_non_eof_file=true;
}
}
if (!have_non_eof_file) {
reads_to_process=fxrn.reads_filled-fxrn.reads_seen;
for (i=0; i<options.number_of_sam_files; i++) {
if (sam_files[i]->last_tested>fxrn.reads_seen) {
reads_to_process=MAX(reads_to_process,sam_files[i]->last_tested-fxrn.reads_seen);
}
}
if (reads_to_process==0) {
break;
}
reads_to_process=MIN(reads_to_process,options.read_rate);
}
assert(reads_to_process<=options.read_rate);
//fprintf(stderr,"Processing %d reads entries on this iteration..\n",reads_to_process);
if (reads_to_process>0) {
for (i=0; i<options.number_of_sam_files; i++) {
const size_t read_id=(fxrn.reads_seen+reads_to_process-1)%options.read_rate;
//fprintf(stderr,"read rate %lu, using read_id %lu %lu\n",options.read_rate,read_id,sam_files[i]->inter_offsets[read_id]);
sam_files[i]->fb->unseen_start=sam_files[i]->inter_offsets[read_id];
sam_files[i]->pretty_stack_start=sam_files[i]->pretty_stack_ends[read_id];
}
} else {
fprintf(stderr,"AN ERROR HAS OCCURED! - try increasing buffer size?\n");
exit(1);
}
if (options.paired && options.unpaired) {
fprintf(stderr,"FAIL! can't have both paired and unpaired data in input file!\n");
exit(1);
}
//fprintf(stderr,"reads to process %lu\n",reads_to_process);
//prepare output
#pragma omp parallel for schedule(guided) //schedule(static,10)
for (i=0; i<reads_to_process; i++) {
const size_t read_id=(fxrn.reads_seen+i)%options.read_rate;
int thread_num = omp_get_thread_num();
pp_ll_combine_and_check(master_ll+i,pp_ll_index+read_id*options.number_of_sam_files,thread_heaps+thread_num,obs+thread_num);
int j;
for (j=0; j<options.number_of_sam_files; j++) {
memset(pp_ll_index[read_id*options.number_of_sam_files+j],0,sizeof(pp_ll)*LL_ALL);
}
}
//output
for (i=0; i<reads_to_process; i++) {
pretty * pa=master_ll[i].head;
while (pa!=NULL) {
if (pa->paired_sequencing) {
if (pa->first_in_pair) {
fprintf(output_file,"%s\n",pa->sam_string);
if (pa->mate_pair!=NULL) {
fprintf(output_file,"%s\n",pa->mate_pair->sam_string);
}
} else {
if (pa->mate_pair!=NULL) {
fprintf(output_file,"%s\n",pa->mate_pair->sam_string);
}
fprintf(output_file,"%s\n",pa->sam_string);
}
} else {
fprintf(output_file,"%s\n",pa->sam_string);
}
pa=pa->next;
}
}
memset(master_ll,0,sizeof(pp_ll)*reads_to_process);
//update counters
if (reads_to_process>0) {
fxrn.reads_exhausted=false;
fxrn.reads_seen+=reads_to_process;
fxrn.reads_unseen-=reads_to_process;
}
reads_processed+=reads_to_process;
//fprintf(stderr,"XReads seen %lu, reads unseen %lu, reads filled %lu\n",fxrn.reads_seen,fxrn.reads_unseen,fxrn.reads_filled);
if ( (clock()-last_time)/options.threads > CLOCKS_PER_SEC/4) {
double reads_per_second=reads_processed/( (double)(clock()-start_time)/(CLOCKS_PER_SEC*options.threads));
double reads_per_iteration=reads_processed/(double)iterations;
fprintf(stderr,"Processing overall at %lf reads / second, %lf reads / iteration, processed %lu\n",reads_per_second,reads_per_iteration,reads_processed);
last_time=clock();
}
}
}
fprintf(stderr,"Processed %lu reads\n",reads_processed);
free(master_ll);
free(sam_headers);
free(pp_ll_index);
for (i=0; i<options.number_of_sam_files; i++) {
sam_close(sam_files[i]);
}
free(sam_files);
for (i=0; i<options.threads; i++) {
heap_pa_destroy(thread_heaps+i);
free(obs[i].base);
}
free(thread_heaps);
fb_close(fxrn.fb);
free(fxrn.read_names);
return 0;
}
|
GB_binop__gt_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_fp32)
// A*D function (colscale): GB (_AxD__gt_fp32)
// D*A function (rowscale): GB (_DxB__gt_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_fp32)
// C=scalar+B GB (_bind1st__gt_fp32)
// C=scalar+B' GB (_bind1st_tran__gt_fp32)
// C=A+scalar GB (_bind2nd__gt_fp32)
// C=A'+scalar GB (_bind2nd_tran__gt_fp32)
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_FP32 || GxB_NO_GT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
graph.h | #pragma once
#include "util/timer.h"
#include <sys/time.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <unordered_map>
#include <vector>
#include <chrono>
#include <fstream>
#include "util/util.h"
#include "util/log/log.h"
using namespace std;
typedef int vid_t;
//typedef unsigned int eid_t;
typedef size_t eid_t;
typedef struct {
long n;
long m;
vid_t *adj;
eid_t *num_edges;
eid_t *eid;
} graph_t;
//Define an Edge data type
struct Edge {
vid_t u;
vid_t v;
Edge() {
this->u = 0;
this->v = 0;
}
Edge(vid_t u, vid_t v) {
this->u = u;
this->v = v;
}
};
void free_graph(graph_t *g);
void getEidAndEdgeList(graph_t *g, Edge *idToEdge);
template<typename T>
struct Graph {
string dir;
uint32_t nodemax;
T edgemax;
// csr representation
T *node_off;
int *edge_dst;
vector<int> degree;
explicit Graph(char *dir_cstr);
public:
void ReadDegree();
void CheckInputGraph();
void ReadAdjacencyList();
};
template<typename T>
Graph<T>::Graph(char *dir_cstr) {
dir = string(dir_cstr);
// clear the 4 bytes
edgemax = 0;
ReadDegree();
ReadAdjacencyList();
CheckInputGraph();
}
using namespace std::chrono;
template<typename T>
void Graph<T>::ReadDegree() {
auto start = high_resolution_clock::now();
ifstream deg_file(dir + string("/b_degree.bin"), ios::binary);
int int_size = 0;
deg_file.read(reinterpret_cast<char *>(&int_size), 4);
if (int_size != sizeof(T)) {
log_warn("int_size != sizeof(T), %d, %d", int_size, sizeof(T));
}
deg_file.read(reinterpret_cast<char *>(&nodemax), sizeof(int));
deg_file.read(reinterpret_cast<char *>(&edgemax), int_size);
log_info("int size: %d, n: %s, m: %s", int_size, FormatWithCommas(nodemax).c_str(),
FormatWithCommas(edgemax).c_str());
degree.resize(static_cast<unsigned long>(nodemax));
deg_file.read(reinterpret_cast<char *>(°ree.front()), sizeof(int) * nodemax);
auto end = high_resolution_clock::now();
log_info("read degree file time: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0);
}
template<typename T>
void Graph<T>::ReadAdjacencyList() {
auto start = high_resolution_clock::now();
ifstream adj_file(dir + string("/b_adj.bin"), ios::binary);
// csr representation
node_off = (T *) malloc(sizeof(T) * (nodemax + 1));
edge_dst = static_cast<int *>(malloc(sizeof(int) * static_cast<uint64_t>(edgemax + 16)));
string dst_v_file_name = dir + string("/b_adj.bin");
auto dst_v_fd = open(dst_v_file_name.c_str(), O_RDONLY, S_IRUSR | S_IWUSR);
int *buffer = (int *) mmap(0, static_cast<uint64_t >(edgemax) * 4u, PROT_READ, MAP_PRIVATE, dst_v_fd, 0);
// prefix sum
node_off[0] = 0;
for (auto i = 0u; i < nodemax; i++) { node_off[i + 1] = node_off[i] + degree[i]; }
auto end = high_resolution_clock::now();
log_info("malloc, and sequential-scan time: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0);
// load dst vertices into the array
#pragma omp parallel for schedule(dynamic, 1000)
for (auto i = 0u; i < nodemax; i++) {
// copy to the high memory bandwidth mem
for (uint64_t offset = node_off[i]; offset < node_off[i + 1]; offset++) {
edge_dst[offset] = buffer[offset];
}
// inclusive
degree[i]++;
}
munmap(buffer, static_cast<uint64_t >(edgemax) * 4u);
#ifdef VERIFY_INPUT
// Verify.
#pragma omp parallel for schedule(dynamic, 1000)
for (auto u = 0u; u < nodemax; u++) {
for (size_t offset = node_off[u]; offset < node_off[u + 1]; offset++) {
auto v = edge_dst[offset];
if (BranchFreeBinarySearch(edge_dst, node_off[v], node_off[v + 1], (int) u) == node_off[v + 1]) {
log_fatal("CSR not correct...");
exit(-1);
}
}
}
log_info("CSR verify pass");
#endif
auto end2 = high_resolution_clock::now();
log_info("read adjacency list file time: %.3lf s", duration_cast<milliseconds>(end2 - end).count() / 1000.0);
}
template<typename T>
void Graph<T>::CheckInputGraph() {
auto start = high_resolution_clock::now();
#pragma omp parallel for schedule(dynamic, 5000)
for (auto i = 0u; i < nodemax; i++) {
for (auto j = node_off[i]; j < node_off[i + 1]; j++) {
if (edge_dst[j] == static_cast<int>(i)) {
log_error("Self loop of v: %d", i);
exit(1);
}
if (j > node_off[i] && edge_dst[j] <= edge_dst[j - 1]) {
log_error("Edges not sorted in increasing id order!\nThe program may not run properly!");
exit(1);
}
}
}
auto end = high_resolution_clock::now();
log_info("check input graph file time: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0);
}
double timer();
|
GB_reduce_to_scalar_template.c | //------------------------------------------------------------------------------
// GB_reduce_to_scalar_template: s=reduce(A), reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Reduce a matrix to a scalar, with typecasting and generic operators.
// No panel is used.
{
const GB_ATYPE *restrict Ax = A->x ;
int64_t anz = GB_NNZ (A) ;
ASSERT (anz > 0) ;
if (nthreads == 1)
{
//----------------------------------------------------------------------
// single thread
//----------------------------------------------------------------------
// s = (ztype) Ax [0]
GB_CAST_ARRAY_TO_SCALAR (s, Ax, 0) ;
for (int64_t p = 1 ; p < anz ; p++)
{
// check for early exit
GB_BREAK_IF_TERMINAL (s) ;
// s = op (s, (ztype) Ax [p])
GB_ADD_CAST_ARRAY_TO_SCALAR (s, Ax, p) ;
}
}
else
{
//----------------------------------------------------------------------
// create workspace for multiple threads
//----------------------------------------------------------------------
// ztype W [ntasks] ;
GB_REDUCTION_WORKSPACE (W, ntasks) ;
ASSERT (ntasks <= anz) ;
bool early_exit = false ;
//----------------------------------------------------------------------
// each thread reduces its own slice in parallel
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t pstart, pend ;
GB_PARTITION (pstart, pend, anz, tid, ntasks) ;
// ztype t = (ztype) Ax [pstart], with typecast
GB_SCALAR (t) ;
GB_CAST_ARRAY_TO_SCALAR (t, Ax, pstart) ;
GB_IF_NOT_EARLY_EXIT
{
for (int64_t p = pstart+1 ; p < pend ; p++)
{
// check for early exit
GB_PARALLEL_BREAK_IF_TERMINAL (t) ;
// t = op (t, (ztype) Ax [p]), with typecast
GB_ADD_CAST_ARRAY_TO_SCALAR (t, Ax, p) ;
}
}
// W [tid] = t, no typecast
GB_COPY_SCALAR_TO_ARRAY (W, tid, t) ;
}
//----------------------------------------------------------------------
// sum up the results of each slice using a single thread
//----------------------------------------------------------------------
// s = W [0], no typecast
GB_COPY_ARRAY_TO_SCALAR (s, W, 0) ;
for (int tid = 1 ; tid < ntasks ; tid++)
{
// s = op (s, W [tid]), no typecast
GB_ADD_ARRAY_TO_SCALAR (s, W, tid) ;
}
}
}
|
omp_simd_aligned1.c | //Variable examples of using simd directives
void foo (int n, double *a, double* b)
{
for (int i=0; i<n; i++)
a[i]=b[i];
}
void foo2 (int n, double *a, double* b)
{
for (int i=0; i<n; i++)
a[i]=b[i];
}
void foo3 (int n, double *a, double* b)
{
int j=0;
for (int i=0; i<n; i++,j++)
{
a[i]=b[i]+j;
}
}
void foo32 (int n, double *a, double* b)
{
int j=0, k=0;
for (int i=0; i<n; i++,j++,k++)
{
a[i]=b[i]+j+k;
}
}
void foo33 (int n, double *a, double* b)
{
int j=0, k=0;
for (int i=0; i<n; i++,j++,k++)
{
a[i]=b[i]+j+k;
}
}
void fooAligned (int n, double *a, double* b)
{
int j=0, k=0;
#pragma omp simd aligned(j,k)
for (int i=0; i<n; i++,j++,k++)
{
a[i]=b[i]+j+k;
}
}
void fooAligned2 (int n, double *a, double* b)
{
int j=0, k=0;
for (int i=0; i<n; i++,j++,k++)
{
a[i]=b[i]+j+k;
}
}
double work( double *a, double *b, int n )
{
int i;
double tmp, sum;
sum = 0.0;
for (i = 0; i < n; i++) {
tmp = a[i] + b[i];
sum += tmp;
}
return sum;
}
#define N 45
int a[N], b[N], c[N];
void foo4(int i, double* P)
{
int j;
for (i = 0; i < 999; ++i) {
j = P[i];
}
}
void work2( double **a, double **b, double **c, int n )
{
int i, j;
double tmp;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
tmp = a[i][j] + b[i][j];
c[i][j] = tmp;
}
}
}
void work3( double **a, double **b, double **c, int n )
{
int i, j;
double tmp;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
tmp = a[i][j] + b[i][j];
c[i][j] = tmp;
}
}
}
// declare simd can show up several times!
float bar(int * p) {
*p = *p +10;
return *p;
}
// declare simd can show up several times!
float bar2(int * p) {
*p = *p +10;
return *p;
}
|
inference_helper.h | /* Copyright 2021 iwatake2222
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef INFERENCE_HELPER_
#define INFERENCE_HELPER_
/* for general */
#include <cstdint>
#include <cmath>
#include <string>
#include <vector>
#include <array>
#include <memory>
class TensorInfo {
public:
enum {
kTensorTypeNone,
kTensorTypeUint8,
kTensorTypeInt8,
kTensorTypeFp32,
kTensorTypeInt32,
kTensorTypeInt64,
};
public:
TensorInfo()
: name("")
, id(-1)
, tensor_type(kTensorTypeNone)
, is_nchw(true)
{}
~TensorInfo() {}
int32_t GetElementNum() const
{
int32_t element_num = 1;
for (const auto& dim : tensor_dims) {
element_num *= dim;
}
return element_num;
}
int32_t GetBatch() const
{
if (tensor_dims.size() <= 0) return -1;
return tensor_dims[0];
}
int32_t GetChannel() const
{
if (is_nchw) {
if (tensor_dims.size() <= 1) return -1;
return tensor_dims[1];
} else {
if (tensor_dims.size() <= 3) return -1;
return tensor_dims[3];
}
}
int32_t GetHeight() const
{
if (is_nchw) {
if (tensor_dims.size() <= 2) return -1;
return tensor_dims[2];
} else {
if (tensor_dims.size() <= 1) return -1;
return tensor_dims[1];
}
}
int32_t GetWidth() const
{
if (is_nchw) {
if (tensor_dims.size() <= 3) return -1;
return tensor_dims[3];
} else {
if (tensor_dims.size() <= 2) return -1;
return tensor_dims[2];
}
}
public:
std::string name; // [In] Set the name_ of tensor
int32_t id; // [Out] Do not modify (Used in InferenceHelper)
int32_t tensor_type; // [In] The type of tensor (e.g. kTensorTypeFp32)
std::vector<int32_t> tensor_dims; // InputTensorInfo: [In] The dimentions of tensor. (If empty at initialize, the size is updated from model info.)
// OutputTensorInfo: [Out] The dimentions of tensor is set from model information
bool is_nchw; // [IN] NCHW or NHWC
};
class InputTensorInfo : public TensorInfo {
public:
enum {
kDataTypeImage,
kDataTypeBlobNhwc, // data_ which already finished preprocess(color conversion, resize, normalize_, etc.)
kDataTypeBlobNchw,
};
public:
InputTensorInfo()
: data(nullptr)
, data_type(kDataTypeImage)
, image_info({ -1, -1, -1, -1, -1, -1, -1, true, false })
, normalize({ 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f })
{}
InputTensorInfo(std::string name_, int32_t tensor_type_, bool is_nchw_ = true)
: InputTensorInfo()
{
name = name_;
tensor_type = tensor_type_;
is_nchw = is_nchw_;
}
~InputTensorInfo() {}
public:
void* data; // [In] Set the pointer to image/blob
int32_t data_type; // [In] Set the type of data_ (e.g. kDataTypeImage)
struct {
int32_t width;
int32_t height;
int32_t channel;
int32_t crop_x;
int32_t crop_y;
int32_t crop_width;
int32_t crop_height;
bool is_bgr; // used when channel == 3 (true: BGR, false: RGB)
bool swap_color;
} image_info; // [In] used when data_type_ == kDataTypeImage
struct {
float mean[3];
float norm[3];
} normalize; // [In] used when data_type_ == kDataTypeImage
};
class OutputTensorInfo : public TensorInfo {
public:
OutputTensorInfo()
: data(nullptr)
, quant({ 1.0f, 0 })
, data_fp32_(nullptr)
{}
OutputTensorInfo(std::string name_, int32_t tensor_type_, bool is_nchw_ = true)
: OutputTensorInfo()
{
name = name_;
tensor_type = tensor_type_;
is_nchw = is_nchw_;
}
~OutputTensorInfo() {
if (data_fp32_ != nullptr) {
delete[] data_fp32_;
}
}
float* GetDataAsFloat() { /* Returned pointer should be with const, but returning pointer without const is convenient to create cv::Mat */
if (tensor_type == kTensorTypeUint8 || tensor_type == kTensorTypeInt8) {
if (data_fp32_ == nullptr) {
data_fp32_ = new float[GetElementNum()];
}
if (tensor_type == kTensorTypeUint8) {
#pragma omp parallel
for (int32_t i = 0; i < GetElementNum(); i++) {
const uint8_t* val_uint8 = static_cast<const uint8_t*>(data);
float val_float = (val_uint8[i] - quant.zero_point) * quant.scale;
data_fp32_[i] = val_float;
}
} else {
#pragma omp parallel
for (int32_t i = 0; i < GetElementNum(); i++) {
const int8_t* val_int8 = static_cast<const int8_t*>(data);
float val_float = (val_int8[i] - quant.zero_point) * quant.scale;
data_fp32_[i] = val_float;
}
}
return data_fp32_;
} else if (tensor_type == kTensorTypeFp32) {
return static_cast<float*>(data);
} else {
return nullptr;
}
}
public:
void* data; // [Out] Pointer to the output data_
struct {
float scale;
int32_t zero_point;
} quant; // [Out] Parameters for dequantization (convert uint8 to float)
private:
float* data_fp32_;
};
namespace cv {
class Mat;
};
class InferenceHelper {
public:
enum {
kRetOk = 0,
kRetErr = -1,
};
typedef enum {
kOpencv,
kOpencvGpu,
kTensorflowLite,
kTensorflowLiteXnnpack,
kTensorflowLiteGpu,
kTensorflowLiteEdgetpu,
kTensorflowLiteNnapi,
kTensorrt,
kNcnn,
kMnn,
kSnpe,
kArmnn,
kNnabla,
kNnablaCuda,
} HelperType;
public:
static InferenceHelper* Create(const HelperType helper_type);
static void PreProcessByOpenCV(const InputTensorInfo& input_tensor_info, bool is_nchw, cv::Mat& img_blob); // use this if the selected inference engine doesn't support pre-process
public:
virtual ~InferenceHelper() {}
virtual int32_t SetNumThreads(const int32_t num_threads) = 0;
virtual int32_t SetCustomOps(const std::vector<std::pair<const char*, const void*>>& custom_ops) = 0;
virtual int32_t Initialize(const std::string& model_filename, std::vector<InputTensorInfo>& input_tensor_info_list, std::vector<OutputTensorInfo>& output_tensor_info_list) = 0;
virtual int32_t Finalize(void) = 0;
virtual int32_t PreProcess(const std::vector<InputTensorInfo>& input_tensor_info_list) = 0;
virtual int32_t Process(std::vector<OutputTensorInfo>& output_tensor_info_list) = 0;
protected:
void ConvertNormalizeParameters(InputTensorInfo& tensor_info);
void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, float* dst);
void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, uint8_t* dst);
void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, int8_t* dst);
template<typename T>
void PreProcessBlob(int32_t num_thread, const InputTensorInfo& input_tensor_info, T *dst);
protected:
HelperType helper_type_;
};
#endif
|
target_data_messages.c | // RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - %s
void foo() { }
int main(int argc, char **argv) {
L1:
foo();
#pragma omp target data
{
foo();
goto L1; // expected-error {{use of undeclared label 'L1'}}
}
goto L2; // expected-error {{use of undeclared label 'L2'}}
#pragma omp target data
L2:
foo();
#pragma omp target data(i) // expected-warning {{extra tokens at the end of '#pragma omp target data' are ignored}}
{
foo();
}
#pragma omp target unknown // expected-warning {{extra tokens at the end of '#pragma omp target' are ignored}}
{
foo();
}
return 0;
}
|
GB_binop__pair_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_fc64)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fc64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 1
// B type: GxB_FC64_t
// B pattern? 1
// BinaryOp: cij = GxB_CMPLX(1,0)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GxB_CMPLX(1,0) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_FC64 || GxB_NO_PAIR_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pair_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = GxB_CMPLX(1,0) ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = GxB_CMPLX(1,0) ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = GxB_CMPLX(1,0) ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = GxB_CMPLX(1,0) ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_unaryop__identity_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_fp64
// op(A') function: GB_tran__identity_uint8_fp64
// C type: uint8_t
// A type: double
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z ; GB_CAST_UNSIGNED(z,aij,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_fp64
(
uint8_t *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
trsm_x_coo_n_lo_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_Number diag[m];
memset(diag, '\0', m * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < A->nnz; r++)
{
if(A->row_indx[r] == A->col_indx[r])
{
diag[A->row_indx[r]] = A->values[r];
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = 0; r < m; r++)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT cr = 0; cr < A->nnz; cr++)
{
int row = A->row_indx[cr];
int col = A->col_indx[cr];
if(row == r && col < r)
alpha_madde(temp, A->values[cr], y[out_y_col * ldy + col]);
}
ALPHA_Number t;
alpha_mul(t, alpha, x[out_y_col * ldx + r]);
alpha_sub(t, t, temp);
alpha_div(y[out_y_col * ldy + r], t, diag[r]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
fig4.85-nested-parallel.c | /*
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
Copyright 2009 Sun Microsystems, Inc. All rights reserved.
The contents of this file are subject to the terms of the BSD License("BSD")(the "License").
You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt
The BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistribution of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Sun Microsystems, Inc. or the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
This software is provided "AS IS," without a warranty of any kind. ALL
EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND
ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A
RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES.
IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT
OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
You acknowledge that this software is not designed, licensed or intended for
use in the design, construction, operation or maintenance of any nuclear facility.
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#define omp_get_nested() 0
#endif
int main()
{
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(3);
(void) omp_set_nested(TRUE);
if (! omp_get_nested()) {printf("Warning: nested parallelism not set\n");}
#endif
printf("Nested parallelism is %s\n",
omp_get_nested() ? "supported" : "not supported");
/*
------------------------------------------------------------------------
Inside the parallel region we can no longer distinguish between the
threads
------------------------------------------------------------------------
*/
#pragma omp parallel
{
printf("Thread %d executes the outer parallel region\n",
omp_get_thread_num());
#pragma omp parallel num_threads(2)
{
printf(" Thread %d executes the inner parallel region\n",
omp_get_thread_num());
} /*-- End of inner parallel region --*/
} /*-- End of outer parallel region --*/
return(0);
}
|
fc_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include <arm_neon.h>
#include "fc_kernel_arm.h"
#include <sys/time.h>
#ifdef __aarch64__
void sgemv_1x8_a72(float* biases, float* input, float* kernel, long kernel_size, float* output);
void sgemv_1x2_a72(float* biases, float* input, float* kernel, long kernel_size, float* output);
#else
void sgemv_1x8_a17(float* biases, float* input, float* kernel, int kernel_size, float* output);
void sgemv_1x2_a17(float* biases, float* input, float* kernel, int kernel_size, float* output);
#endif
typedef void (*kernel_t)(float* biases, float* input, float* kernel, int kernel_size, float* output);
static void sgemv1x8(float* input, float* output, float* kernel, float* bias, int kernel_size, int start_ch, int end_ch,
int num_thread, kernel_t kernel_1x8)
{
#pragma omp parallel for num_threads(num_thread)
for (int ch = start_ch; ch < end_ch; ch += 8)
{
float* cur_kernel = kernel + ch * kernel_size;
float* cur_output = output + ch;
float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
float* cur_bias = bias ? bias + ch : zeros;
kernel_1x8(cur_bias, input, cur_kernel, kernel_size, cur_output);
}
}
static void sgemv1x2(float* input, float* output, float* kernel, float* bias, int kernel_size, int start_ch, int end_ch,
int num_thread, kernel_t kernel_1x2)
{
int end_ch2 = end_ch & -2;
#pragma omp parallel for num_threads(num_thread)
for (int ch = start_ch; ch < end_ch2; ch += 2)
{
float* cur_kernel = kernel + ch * kernel_size;
float* cur_output = output + ch;
float zeros[2] = {0.f, 0.f};
float* cur_bias = bias ? bias + ch : zeros;
kernel_1x2(cur_bias, input, cur_kernel, kernel_size, cur_output);
}
int ch = end_ch2;
if (end_ch & 0x1)
{
float* cur_kernel = kernel + end_ch2 * kernel_size;
float* cur_output = output + end_ch2;
float sum = bias ? bias[ch] : 0.f;
for (int i = 0; i < kernel_size; i++)
sum += input[i] * cur_kernel[i];
*cur_output = sum;
}
}
static void interleave_kernel(const float* kernel, float* kernel_interleaved, int out_chan, int kernel_size)
{
int i, j, k;
float* cur_kernel[8];
float* cur_kernel_interleaved;
// interleave 8 kernel
for (i = 0; i < (out_chan & -8); i += 8)
{
for (j = 0; j < 8; j++)
cur_kernel[j] = ( float* )kernel + kernel_size * (i + j);
cur_kernel_interleaved = ( float* )kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
for (j = 0; j < 8; j++)
cur_kernel_interleaved[8 * k + j] = *(cur_kernel[j] + k);
}
// interleave 2 kernel
for (; i < (out_chan & -2); i += 2)
{
for (j = 0; j < 2; j++)
cur_kernel[j] = ( float* )kernel + kernel_size * (i + j);
cur_kernel_interleaved = ( float* )kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
for (j = 0; j < 2; j++)
cur_kernel_interleaved[2 * k + j] = *(cur_kernel[j] + k);
}
// copy last kernel
if (out_chan & 0x1)
{
cur_kernel[0] = ( float* )kernel + kernel_size * i;
cur_kernel_interleaved = ( float* )kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
cur_kernel_interleaved[k] = *(cur_kernel[0] + k);
}
}
int fc_kernel_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor,
struct fc_priv_info* priv_info, struct fc_param* param)
{
int num_output = param->num_output;
int kernel_size = filter_tensor->dims[1];
if (!priv_info->interleave_buffer)
{
int elemsize = input_tensor->elem_size;
int mem_size = elemsize * num_output * kernel_size;
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
float* filter_data = ( float* )filter_tensor->data;
interleave_kernel(filter_data, ( float* )priv_info->interleave_buffer, num_output, kernel_size);
return 0;
}
int fc_kernel_postrun(struct fc_priv_info* priv_info)
{
if (priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
priv_info->interleave_buffer_size = 0;
}
if (priv_info->input_buffer != NULL)
{
sys_free(priv_info->input_buffer);
priv_info->input_buffer = NULL;
priv_info->input_buffer_size = 0;
}
return 0;
}
int fc_kernel_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct fc_priv_info* priv_info, struct fc_param* param,
int num_thread, int cpu_affinity)
{
int out_num = param->num_output;
int kernel_size = filter_tensor->dims[1];
float* input = input_tensor->data;
float* output = output_tensor->data;
float* biases = NULL;
if (bias_tensor)
biases = bias_tensor->data;
float* weight = priv_info->interleave_buffer;
int remain_out_start = (out_num >> 3) << 3;
/* set cpu affinity sgemv kernel */
kernel_t kernel_1x8;
kernel_t kernel_1x2;
#ifdef __aarch64__
kernel_1x8 = (kernel_t)sgemv_1x8_a72;
kernel_1x2 = (kernel_t)sgemv_1x2_a72;
#else
kernel_1x8 = (kernel_t)sgemv_1x8_a17;
kernel_1x2 = (kernel_t)sgemv_1x2_a17;
#endif
/* process */
for (int i = 0; i < input_tensor->dims[0]; i++)
{
float* cur_input = input + i * kernel_size;
float* cur_output = output + i * out_num;
sgemv1x8(cur_input, cur_output, weight, biases, kernel_size, 0, remain_out_start, num_thread, kernel_1x8);
if (out_num & 0x7)
sgemv1x2(cur_input, cur_output, weight, biases, kernel_size, remain_out_start, out_num, num_thread, kernel_1x2);
}
return 0;
}
|
taskwait.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
#include "callback.h"
#include <omp.h>
int main()
{
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
#pragma omp task
{
x++;
}
#pragma omp taskwait
print_current_address(1);
}
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_taskwait_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_taskwait_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_taskwait_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: ompt_event_taskwait_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
IcgMask.c | // Copyright (C) 2016 Gernot Riegler
// Institute for Computer Graphics and Vision (ICG)
// Graz University of Technology (TU GRAZ)
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. All advertising materials mentioning features or use of this software
// must display the following acknowledgement:
// This product includes software developed by the ICG, TU GRAZ.
// 4. Neither the name of the ICG, TU GRAZ nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/IcgMask.c"
#else
static void icgnn_(IcgMask_updateOutputGrid)(lua_State* L, THTensor* in,
THTensor* out, real mask_value) {
int height_factor = luaT_getfieldcheckint(L, 1, "height_factor");
int width_factor = luaT_getfieldcheckint(L, 1, "width_factor");
long n_dim = in->nDimension;
luaL_argcheck(L, n_dim == 3 || n_dim == 4, 2, "3D or 4D(batch mode) tensor expected");
in = THTensor_(newContiguous)(in);
long num, channels, height, width;
if(n_dim == 3) {
num = 1;
channels = in->size[0];
height = in->size[1];
width = in->size[2];
THTensor_(resize3d)(out, channels, height, width);
}
else if(n_dim == 4) {
num = in->size[0];
channels = in->size[1];
height = in->size[2];
width = in->size[3];
THTensor_(resize4d)(out, num, channels, height, width);
}
real* in_data = THTensor_(data)(in);
real* out_data = THTensor_(data)(out);
long h_offset = (height_factor - 1) / 2;
long w_offset = (width_factor - 1) / 2;
long n;
#pragma omp parallel for private(n)
for(n = 0; n < num * channels; ++n) {
long h;
for(h = 0; h < height; ++h) {
long w;
for(w = 0; w < width; ++w) {
long idx = (n * height + h) * width + w;
if(h % height_factor == h_offset && w % width_factor == w_offset) {
out_data[idx] = in_data[idx];
}
else {
out_data[idx] = mask_value;
}
}
}
}
THTensor_(free)(in);
}
static void icgnn_(IcgMask_updateOutputBorder)(lua_State* L, THTensor* in,
THTensor* out, real mask_value) {
int border = luaT_getfieldcheckint(L, 1, "border");
long n_dim = in->nDimension;
luaL_argcheck(L, n_dim == 3 || n_dim == 4, 2, "3D or 4D(batch mode) tensor expected");
in = THTensor_(newContiguous)(in);
long num, channels, height, width;
if(n_dim == 3) {
num = 1;
channels = in->size[0];
height = in->size[1];
width = in->size[2];
THTensor_(resize3d)(out, channels, height, width);
}
else if(n_dim == 4) {
num = in->size[0];
channels = in->size[1];
height = in->size[2];
width = in->size[3];
THTensor_(resize4d)(out, num, channels, height, width);
}
real* in_data = THTensor_(data)(in);
real* out_data = THTensor_(data)(out);
long n;
#pragma omp parallel for private(n)
for(n = 0; n < num * channels; ++n) {
long h;
for(h = 0; h < height; ++h) {
long w;
for(w = 0; w < width; ++w) {
long idx = (n * height + h) * width + w;
if(h >= border && h < height - border && w >= border && w < width - border) {
out_data[idx] = in_data[idx];
}
else {
out_data[idx] = mask_value;
}
}
}
}
THTensor_(free)(in);
}
static int icgnn_(IcgMask_updateOutput)(lua_State* L) {
THTensor* in = luaT_checkudata(L, 2, torch_Tensor);
THTensor* out = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
const char* mask_type = luaT_getfieldcheckstring(L, 1, "mask_type");
real mask_value = luaT_getfieldchecknumber(L, 1, "mask_value");
if(strcmp(mask_type, "grid") == 0) {
icgnn_(IcgMask_updateOutputGrid)(L, in, out, mask_value);
}
else if(strcmp(mask_type, "border") == 0) {
icgnn_(IcgMask_updateOutputBorder)(L, in, out, mask_value);
}
else {
luaL_error(L, "unknown mask type: %s", mask_type);
}
return 1;
}
static void icgnn_(IcgMask_updateGradInputGrid)(lua_State* L, THTensor* in,
THTensor* grad_out, THTensor* out, THTensor* grad_in, real mask_value) {
int height_factor = luaT_getfieldcheckint(L, 1, "height_factor");
int width_factor = luaT_getfieldcheckint(L, 1, "width_factor");
real* in_data = THTensor_(data)(in);
real* out_data = THTensor_(data)(out);
real* grad_in_data = THTensor_(data)(grad_in);
real* grad_out_data = THTensor_(data)(grad_out);
long n_dim = in->nDimension;
long num, channels, height, width, out_height, out_width;
if(n_dim == 3) {
num = 1;
channels = in->size[0];
height = in->size[1];
width = in->size[2];
}
else if(n_dim == 4) {
num = in->size[0];
channels = in->size[1];
height = in->size[2];
width = in->size[3];
}
long h_offset = (height_factor - 1) / 2;
long w_offset = (width_factor - 1) / 2;
long n;
#pragma omp parallel for private(n)
for(n = 0; n < num * channels; ++n) {
long h;
for(h = 0; h < height; ++h) {
long w;
for(w = 0; w < width; ++w) {
long idx = (n * height + h) * width + w;
if(h % height_factor == h_offset && w % width_factor == w_offset) {
grad_in_data[idx] = grad_out_data[idx];
}
else {
grad_in_data[idx] = 0;
}
}
}
}
}
static void icgnn_(IcgMask_updateGradInputBorder)(lua_State* L, THTensor* in,
THTensor* grad_out, THTensor* out, THTensor* grad_in, real mask_value) {
int border = luaT_getfieldcheckint(L, 1, "border");
real* in_data = THTensor_(data)(in);
real* out_data = THTensor_(data)(out);
real* grad_in_data = THTensor_(data)(grad_in);
real* grad_out_data = THTensor_(data)(grad_out);
long n_dim = in->nDimension;
long num, channels, height, width, out_height, out_width;
if(n_dim == 3) {
num = 1;
channels = in->size[0];
height = in->size[1];
width = in->size[2];
}
else if(n_dim == 4) {
num = in->size[0];
channels = in->size[1];
height = in->size[2];
width = in->size[3];
}
long n;
#pragma omp parallel for private(n)
for(n = 0; n < num * channels; ++n) {
long h;
for(h = 0; h < height; ++h) {
long w;
for(w = 0; w < width; ++w) {
long idx = (n * height + h) * width + w;
if(h >= border && h < height - border && w >= border && w < width - border) {
grad_in_data[idx] = grad_out_data[idx];
}
else {
grad_in_data[idx] = 0;
}
}
}
}
}
static int icgnn_(IcgMask_updateGradInput)(lua_State *L) {
THTensor* in = luaT_checkudata(L, 2, torch_Tensor);
THTensor* grad_out = luaT_checkudata(L, 3, torch_Tensor);
THTensor* out = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
THTensor* grad_in = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
THTensor_(resizeAs)(grad_in, in);
const char* mask_type = luaT_getfieldcheckstring(L, 1, "mask_type");
real mask_value = luaT_getfieldchecknumber(L, 1, "mask_value");
if(strcmp(mask_type, "grid") == 0) {
icgnn_(IcgMask_updateGradInputGrid)(L, in, grad_out, out, grad_in, mask_value);
}
else if(strcmp(mask_type, "border") == 0) {
icgnn_(IcgMask_updateGradInputBorder)(L, in, grad_out, out, grad_in, mask_value);
}
else {
luaL_error(L, "unknown mask type: %s", mask_type);
}
return 1;
}
static const struct luaL_Reg icgnn_(IcgMask__) [] = {
{"IcgMask_updateOutput", icgnn_(IcgMask_updateOutput)},
{"IcgMask_updateGradInput", icgnn_(IcgMask_updateGradInput)},
{NULL, NULL}
};
static void icgnn_(IcgMask_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, icgnn_(IcgMask__), "icgnn");
lua_pop(L,1);
}
#endif
|
cps_dp.c | /* Generated by Cython 0.29.24 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [
"/home/lothar/.local/lib/python3.8/site-packages/numpy/core/include/numpy/arrayobject.h",
"/home/lothar/.local/lib/python3.8/site-packages/numpy/core/include/numpy/arrayscalars.h",
"/home/lothar/.local/lib/python3.8/site-packages/numpy/core/include/numpy/ndarrayobject.h",
"/home/lothar/.local/lib/python3.8/site-packages/numpy/core/include/numpy/ndarraytypes.h",
"/home/lothar/.local/lib/python3.8/site-packages/numpy/core/include/numpy/ufuncobject.h"
],
"extra_compile_args": [
"-fopenmp"
],
"extra_link_args": [
"-fopenmp"
],
"include_dirs": [
"/home/lothar/.local/lib/python3.8/site-packages/numpy/core/include"
],
"name": "fairseq.cps_dp",
"sources": [
"fairseq/cps_dp.pyx"
]
},
"module_name": "fairseq.cps_dp"
}
END: Cython Metadata */
#ifndef PY_SSIZE_T_CLEAN
#define PY_SSIZE_T_CLEAN
#endif /* PY_SSIZE_T_CLEAN */
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_24"
#define CYTHON_HEX_VERSION 0x001D18F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#if defined(PyUnicode_IS_READY)
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#else
#define __Pyx_PyUnicode_READY(op) (0)
#endif
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#endif
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__fairseq__cps_dp
#define __PYX_HAVE_API__fairseq__cps_dp
/* Early includes */
#include <math.h>
#include <string.h>
#include <stdio.h>
#include "numpy/arrayobject.h"
#include "numpy/ndarrayobject.h"
#include "numpy/ndarraytypes.h"
#include "numpy/arrayscalars.h"
#include "numpy/ufuncobject.h"
/* NumPy API declarations from "numpy/__init__.pxd" */
#include "math.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* Header.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"fairseq/cps_dp.pyx",
"__init__.pxd",
"stringsource",
"type.pxd",
};
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":690
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":691
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":692
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":693
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":697
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":698
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":699
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":700
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":704
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":705
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":714
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":715
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":716
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":718
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":719
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":720
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":722
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":723
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":725
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":726
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":727
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* "fairseq/cps_dp.pyx":16
* float INFINITY
*
* ctypedef np.float64_t DTYPE_t # <<<<<<<<<<<<<<
* ctypedef np.int64_t DTYPE_int_t
*
*/
typedef __pyx_t_5numpy_float64_t __pyx_t_7fairseq_6cps_dp_DTYPE_t;
/* "fairseq/cps_dp.pyx":17
*
* ctypedef np.float64_t DTYPE_t
* ctypedef np.int64_t DTYPE_int_t # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
typedef __pyx_t_5numpy_int64_t __pyx_t_7fairseq_6cps_dp_DTYPE_int_t;
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/*--- Type declarations ---*/
struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":729
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":730
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":731
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":733
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* "cfunc.to_py":64
*
* @cname("__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py")
* cdef object __Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(DTYPE_t (*f)(DTYPE_t) except *): # <<<<<<<<<<<<<<
* def wrap(DTYPE_t x):
* """wrap(x: 'DTYPE_t') -> 'DTYPE_t'"""
*/
struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py {
PyObject_HEAD
__pyx_t_7fairseq_6cps_dp_DTYPE_t (*__pyx_v_f)(__pyx_t_7fairseq_6cps_dp_DTYPE_t);
};
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* BufferGetAndValidate.proto */
#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\
((obj == Py_None || obj == NULL) ?\
(__Pyx_ZeroBuffer(buf), 0) :\
__Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack))
static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static void __Pyx_ZeroBuffer(Py_buffer* buf);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* BufferFallbackError.proto */
static void __Pyx_RaiseBufferFallbackError(void);
#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1)
#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntCompare.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, long inplace);
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* IterFinish.proto */
static CYTHON_INLINE int __Pyx_IterFinish(void);
/* UnpackItemEndCheck.proto */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
/* PySequenceContains.proto */
static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) {
int result = PySequence_Contains(seq, item);
return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
}
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* FetchCommonType.proto */
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type);
/* CythonFunctionShared.proto */
#define __Pyx_CyFunction_USED 1
#define __Pyx_CYFUNCTION_STATICMETHOD 0x01
#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02
#define __Pyx_CYFUNCTION_CCLASS 0x04
#define __Pyx_CyFunction_GetClosure(f)\
(((__pyx_CyFunctionObject *) (f))->func_closure)
#define __Pyx_CyFunction_GetClassObj(f)\
(((__pyx_CyFunctionObject *) (f))->func_classobj)
#define __Pyx_CyFunction_Defaults(type, f)\
((type *)(((__pyx_CyFunctionObject *) (f))->defaults))
#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\
((__pyx_CyFunctionObject *) (f))->defaults_getter = (g)
typedef struct {
PyCFunctionObject func;
#if PY_VERSION_HEX < 0x030500A0
PyObject *func_weakreflist;
#endif
PyObject *func_dict;
PyObject *func_name;
PyObject *func_qualname;
PyObject *func_doc;
PyObject *func_globals;
PyObject *func_code;
PyObject *func_closure;
PyObject *func_classobj;
void *defaults;
int defaults_pyobjects;
size_t defaults_size; // used by FusedFunction for copying defaults
int flags;
PyObject *defaults_tuple;
PyObject *defaults_kwdict;
PyObject *(*defaults_getter)(PyObject *);
PyObject *func_annotations;
} __pyx_CyFunctionObject;
static PyTypeObject *__pyx_CyFunctionType = 0;
#define __Pyx_CyFunction_Check(obj) (__Pyx_TypeCheck(obj, __pyx_CyFunctionType))
static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml,
int flags, PyObject* qualname,
PyObject *self,
PyObject *module, PyObject *globals,
PyObject* code);
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m,
size_t size,
int pyobjects);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m,
PyObject *tuple);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m,
PyObject *dict);
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m,
PyObject *dict);
static int __pyx_CyFunction_init(void);
/* CythonFunction.proto */
static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml,
int flags, PyObject* qualname,
PyObject *closure,
PyObject *module, PyObject *globals,
PyObject* code);
/* IncludeStringH.proto */
#include <string.h>
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* TypeImport.proto */
#ifndef __PYX_HAVE_RT_ImportType_proto
#define __PYX_HAVE_RT_ImportType_proto
enum __Pyx_ImportType_CheckSize {
__Pyx_ImportType_CheckSize_Error = 0,
__Pyx_ImportType_CheckSize_Warn = 1,
__Pyx_ImportType_CheckSize_Ignore = 2
};
static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* GCCDiagnostics.proto */
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#define __Pyx_HAS_GCC_DIAGNOSTIC
#endif
/* RealImag.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX\
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_float(a, b) ((a)==(b))
#define __Pyx_c_sum_float(a, b) ((a)+(b))
#define __Pyx_c_diff_float(a, b) ((a)-(b))
#define __Pyx_c_prod_float(a, b) ((a)*(b))
#define __Pyx_c_quot_float(a, b) ((a)/(b))
#define __Pyx_c_neg_float(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_float(z) ((z)==(float)0)
#define __Pyx_c_conj_float(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_float(z) (::std::abs(z))
#define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_float(z) ((z)==0)
#define __Pyx_c_conj_float(z) (conjf(z))
#if 1
#define __Pyx_c_abs_float(z) (cabsf(z))
#define __Pyx_c_pow_float(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_double(a, b) ((a)==(b))
#define __Pyx_c_sum_double(a, b) ((a)+(b))
#define __Pyx_c_diff_double(a, b) ((a)-(b))
#define __Pyx_c_prod_double(a, b) ((a)*(b))
#define __Pyx_c_quot_double(a, b) ((a)/(b))
#define __Pyx_c_neg_double(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_double(z) ((z)==(double)0)
#define __Pyx_c_conj_double(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_double(z) (::std::abs(z))
#define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_double(z) ((z)==0)
#define __Pyx_c_conj_double(z) (conj(z))
#if 1
#define __Pyx_c_abs_double(z) (cabs(z))
#define __Pyx_c_pow_double(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'libc.math' */
/* Module declarations from 'cython' */
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_generic = 0;
static PyTypeObject *__pyx_ptype_5numpy_number = 0;
static PyTypeObject *__pyx_ptype_5numpy_integer = 0;
static PyTypeObject *__pyx_ptype_5numpy_signedinteger = 0;
static PyTypeObject *__pyx_ptype_5numpy_unsignedinteger = 0;
static PyTypeObject *__pyx_ptype_5numpy_inexact = 0;
static PyTypeObject *__pyx_ptype_5numpy_floating = 0;
static PyTypeObject *__pyx_ptype_5numpy_complexfloating = 0;
static PyTypeObject *__pyx_ptype_5numpy_flexible = 0;
static PyTypeObject *__pyx_ptype_5numpy_character = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
/* Module declarations from 'fairseq.cps_dp' */
static PyTypeObject *__pyx_ptype___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py = 0;
static CYTHON_INLINE __pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_f_7fairseq_6cps_dp_log1mexp(__pyx_t_7fairseq_6cps_dp_DTYPE_t); /*proto*/
static CYTHON_INLINE __pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_f_7fairseq_6cps_dp_log1pexp(__pyx_t_7fairseq_6cps_dp_DTYPE_t); /*proto*/
static CYTHON_INLINE __pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_f_7fairseq_6cps_dp_log_add(__pyx_t_7fairseq_6cps_dp_DTYPE_t, __pyx_t_7fairseq_6cps_dp_DTYPE_t); /*proto*/
static PyObject *__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(__pyx_t_7fairseq_6cps_dp_DTYPE_t (*)(__pyx_t_7fairseq_6cps_dp_DTYPE_t)); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_7fairseq_6cps_dp_DTYPE_t), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_int_t = { "DTYPE_int_t", NULL, sizeof(__pyx_t_7fairseq_6cps_dp_DTYPE_int_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_7fairseq_6cps_dp_DTYPE_int_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_7fairseq_6cps_dp_DTYPE_int_t), 0 };
#define __Pyx_MODULE_NAME "fairseq.cps_dp"
extern int __pyx_module_is_main_fairseq__cps_dp;
int __pyx_module_is_main_fairseq__cps_dp = 0;
/* Implementation of 'fairseq.cps_dp' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_map;
static PyObject *__pyx_builtin_ImportError;
static const char __pyx_k_b[] = "b";
static const char __pyx_k_i[] = "i";
static const char __pyx_k_j[] = "j";
static const char __pyx_k_k[] = "k";
static const char __pyx_k_n[] = "n";
static const char __pyx_k_p[] = "p";
static const char __pyx_k_r[] = "r";
static const char __pyx_k_x[] = "x";
static const char __pyx_k_dp[] = "dp";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_exp[] = "exp";
static const char __pyx_k_inf[] = "inf";
static const char __pyx_k_log[] = "log";
static const char __pyx_k_low[] = "low";
static const char __pyx_k_map[] = "map";
static const char __pyx_k_ord[] = "ord";
static const char __pyx_k_sum[] = "sum";
static const char __pyx_k_tmp[] = "tmp";
static const char __pyx_k_copy[] = "copy";
static const char __pyx_k_full[] = "full";
static const char __pyx_k_high[] = "high";
static const char __pyx_k_logp[] = "logp";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_name[] = "__name__";
static const char __pyx_k_norm[] = "norm";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_wrap[] = "wrap";
static const char __pyx_k_array[] = "array";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_append[] = "append";
static const char __pyx_k_astype[] = "astype";
static const char __pyx_k_choice[] = "choice";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_linalg[] = "linalg";
static const char __pyx_k_random[] = "random";
static const char __pyx_k_sample[] = "sample";
static const char __pyx_k_thresh[] = "thresh";
static const char __pyx_k_asarray[] = "asarray";
static const char __pyx_k_counter[] = "counter";
static const char __pyx_k_float64[] = "float64";
static const char __pyx_k_randint[] = "randint";
static const char __pyx_k_uniform[] = "uniform";
static const char __pyx_k_nan_to_num[] = "nan_to_num";
static const char __pyx_k_thresholds[] = "thresholds";
static const char __pyx_k_ImportError[] = "ImportError";
static const char __pyx_k_cfunc_to_py[] = "cfunc.to_py";
static const char __pyx_k_log_weights[] = "log_weights";
static const char __pyx_k_logp_sliced[] = "logp_sliced";
static const char __pyx_k_replacement[] = "replacement";
static const char __pyx_k_samples_idx[] = "samples_idx";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_selected_incs[] = "selected_incs";
static const char __pyx_k_selected_inds[] = "selected_inds";
static const char __pyx_k_fairseq_cps_dp[] = "fairseq.cps_dp";
static const char __pyx_k_to_pick_number[] = "to_pick_number";
static const char __pyx_k_inclusion_probs[] = "inclusion_probs";
static const char __pyx_k_sampford_sample[] = "sampford_sample";
static const char __pyx_k_intermediate_res[] = "intermediate_res";
static const char __pyx_k_log_prob_filtered[] = "log_prob_filtered";
static const char __pyx_k_calc_normalization[] = "calc_normalization";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_fairseq_cps_dp_pyx[] = "fairseq/cps_dp.pyx";
static const char __pyx_k_log_inclusion_probs[] = "log_inclusion_probs";
static const char __pyx_k_calc_log_inclusion_probs[] = "calc_log_inclusion_probs";
static const char __pyx_k_subset_sum_product_probs[] = "subset_sum_product_probs";
static const char __pyx_k_Pyx_CFunc_DTYPE__t____DTYPE__t[] = "__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py.<locals>.wrap";
static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
static const char __pyx_k_remaining_subsetsum_product_prob[] = "remaining_subsetsum_product_probs";
static PyObject *__pyx_n_s_ImportError;
static PyObject *__pyx_n_s_Pyx_CFunc_DTYPE__t____DTYPE__t;
static PyObject *__pyx_n_s_append;
static PyObject *__pyx_n_s_array;
static PyObject *__pyx_n_s_asarray;
static PyObject *__pyx_n_s_astype;
static PyObject *__pyx_n_s_b;
static PyObject *__pyx_n_s_calc_log_inclusion_probs;
static PyObject *__pyx_n_s_calc_normalization;
static PyObject *__pyx_n_s_cfunc_to_py;
static PyObject *__pyx_n_s_choice;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_copy;
static PyObject *__pyx_n_s_counter;
static PyObject *__pyx_n_s_dp;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_exp;
static PyObject *__pyx_n_s_fairseq_cps_dp;
static PyObject *__pyx_kp_s_fairseq_cps_dp_pyx;
static PyObject *__pyx_n_s_float64;
static PyObject *__pyx_n_s_full;
static PyObject *__pyx_n_s_high;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_inclusion_probs;
static PyObject *__pyx_n_s_inf;
static PyObject *__pyx_n_s_intermediate_res;
static PyObject *__pyx_n_s_j;
static PyObject *__pyx_n_s_k;
static PyObject *__pyx_n_s_linalg;
static PyObject *__pyx_n_s_log;
static PyObject *__pyx_n_s_log_inclusion_probs;
static PyObject *__pyx_n_s_log_prob_filtered;
static PyObject *__pyx_n_s_log_weights;
static PyObject *__pyx_n_s_logp;
static PyObject *__pyx_n_s_logp_sliced;
static PyObject *__pyx_n_s_low;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_map;
static PyObject *__pyx_n_s_n;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_nan_to_num;
static PyObject *__pyx_n_s_norm;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to;
static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor;
static PyObject *__pyx_n_s_ord;
static PyObject *__pyx_n_s_p;
static PyObject *__pyx_n_s_r;
static PyObject *__pyx_n_s_randint;
static PyObject *__pyx_n_s_random;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_remaining_subsetsum_product_prob;
static PyObject *__pyx_n_s_replacement;
static PyObject *__pyx_n_s_sampford_sample;
static PyObject *__pyx_n_s_sample;
static PyObject *__pyx_n_s_samples_idx;
static PyObject *__pyx_n_s_selected_incs;
static PyObject *__pyx_n_s_selected_inds;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_subset_sum_product_probs;
static PyObject *__pyx_n_s_sum;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_thresh;
static PyObject *__pyx_n_s_thresholds;
static PyObject *__pyx_n_s_tmp;
static PyObject *__pyx_n_s_to_pick_number;
static PyObject *__pyx_n_s_uniform;
static PyObject *__pyx_n_s_wrap;
static PyObject *__pyx_n_s_x;
static PyObject *__pyx_pf_7fairseq_6cps_dp_calc_normalization(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_logp_sliced, int __pyx_v_k); /* proto */
static PyObject *__pyx_pf_7fairseq_6cps_dp_2calc_log_inclusion_probs(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_logp_sliced, PyArrayObject *__pyx_v_subset_sum_product_probs, int __pyx_v_k); /* proto */
static PyObject *__pyx_pf_7fairseq_6cps_dp_4sample(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_logp, PyArrayObject *__pyx_v_selected_inds, int __pyx_v_k); /* proto */
static PyObject *__pyx_pf_7fairseq_6cps_dp_6sampford_sample(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_logp, PyArrayObject *__pyx_v_selected_inds, int __pyx_v_k); /* proto */
static PyObject *__pyx_pf_11cfunc_dot_to_py_40__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py_wrap(PyObject *__pyx_self, __pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_v_x); /* proto */
static PyObject *__pyx_tp_new___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_float_0_;
static PyObject *__pyx_float_0_99;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_10;
static PyObject *__pyx_int_neg_60;
static PyObject *__pyx_slice_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_codeobj__6;
static PyObject *__pyx_codeobj__8;
static PyObject *__pyx_codeobj__10;
static PyObject *__pyx_codeobj__12;
static PyObject *__pyx_codeobj__14;
/* Late includes */
/* "fairseq/cps_dp.pyx":21
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef inline DTYPE_t log1mexp(DTYPE_t x): # <<<<<<<<<<<<<<
* """
* Numerically stable implementation of log(1-exp(x))
*/
static CYTHON_INLINE __pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_f_7fairseq_6cps_dp_log1mexp(__pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_v_x) {
__pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_v_a;
__pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("log1mexp", 0);
/* "fairseq/cps_dp.pyx":29
* """
* cdef DTYPE_t a
* if x >= 0: # <<<<<<<<<<<<<<
* return NAN
* else:
*/
__pyx_t_1 = ((__pyx_v_x >= 0.0) != 0);
if (__pyx_t_1) {
/* "fairseq/cps_dp.pyx":30
* cdef DTYPE_t a
* if x >= 0:
* return NAN # <<<<<<<<<<<<<<
* else:
* a = abs(x)
*/
__pyx_r = NAN;
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":29
* """
* cdef DTYPE_t a
* if x >= 0: # <<<<<<<<<<<<<<
* return NAN
* else:
*/
}
/* "fairseq/cps_dp.pyx":32
* return NAN
* else:
* a = abs(x) # <<<<<<<<<<<<<<
* if 0 < a <= 0.693:
* return log(-expm1(-a))
*/
/*else*/ {
__pyx_v_a = fabs(__pyx_v_x);
/* "fairseq/cps_dp.pyx":33
* else:
* a = abs(x)
* if 0 < a <= 0.693: # <<<<<<<<<<<<<<
* return log(-expm1(-a))
* else:
*/
__pyx_t_1 = (0.0 < __pyx_v_a);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_a <= 0.693);
}
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "fairseq/cps_dp.pyx":34
* a = abs(x)
* if 0 < a <= 0.693:
* return log(-expm1(-a)) # <<<<<<<<<<<<<<
* else:
* return log1p(-exp(-a))
*/
__pyx_r = log((-expm1((-__pyx_v_a))));
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":33
* else:
* a = abs(x)
* if 0 < a <= 0.693: # <<<<<<<<<<<<<<
* return log(-expm1(-a))
* else:
*/
}
/* "fairseq/cps_dp.pyx":36
* return log(-expm1(-a))
* else:
* return log1p(-exp(-a)) # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
/*else*/ {
__pyx_r = log1p((-exp((-__pyx_v_a))));
goto __pyx_L0;
}
}
/* "fairseq/cps_dp.pyx":21
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef inline DTYPE_t log1mexp(DTYPE_t x): # <<<<<<<<<<<<<<
* """
* Numerically stable implementation of log(1-exp(x))
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "fairseq/cps_dp.pyx":40
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef inline DTYPE_t log1pexp(DTYPE_t x) nogil: # <<<<<<<<<<<<<<
* """
* Numerically stable implementation of log(1+exp(x)) aka softmax(0,x).
*/
static CYTHON_INLINE __pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_f_7fairseq_6cps_dp_log1pexp(__pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_v_x) {
__pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
/* "fairseq/cps_dp.pyx":47
* http://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf
* """
* if x <= -37.: # <<<<<<<<<<<<<<
* return exp(x)
* elif -37. <= x <= 18.:
*/
__pyx_t_1 = ((__pyx_v_x <= -37.) != 0);
if (__pyx_t_1) {
/* "fairseq/cps_dp.pyx":48
* """
* if x <= -37.:
* return exp(x) # <<<<<<<<<<<<<<
* elif -37. <= x <= 18.:
* return log1p(exp(x))
*/
__pyx_r = exp(__pyx_v_x);
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":47
* http://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf
* """
* if x <= -37.: # <<<<<<<<<<<<<<
* return exp(x)
* elif -37. <= x <= 18.:
*/
}
/* "fairseq/cps_dp.pyx":49
* if x <= -37.:
* return exp(x)
* elif -37. <= x <= 18.: # <<<<<<<<<<<<<<
* return log1p(exp(x))
* elif 18. < x <= 33.3:
*/
__pyx_t_1 = (-37. <= __pyx_v_x);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_x <= 18.);
}
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "fairseq/cps_dp.pyx":50
* return exp(x)
* elif -37. <= x <= 18.:
* return log1p(exp(x)) # <<<<<<<<<<<<<<
* elif 18. < x <= 33.3:
* return x + exp(-x)
*/
__pyx_r = log1p(exp(__pyx_v_x));
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":49
* if x <= -37.:
* return exp(x)
* elif -37. <= x <= 18.: # <<<<<<<<<<<<<<
* return log1p(exp(x))
* elif 18. < x <= 33.3:
*/
}
/* "fairseq/cps_dp.pyx":51
* elif -37. <= x <= 18.:
* return log1p(exp(x))
* elif 18. < x <= 33.3: # <<<<<<<<<<<<<<
* return x + exp(-x)
* else:
*/
__pyx_t_2 = (18. < __pyx_v_x);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_x <= 33.3);
}
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "fairseq/cps_dp.pyx":52
* return log1p(exp(x))
* elif 18. < x <= 33.3:
* return x + exp(-x) # <<<<<<<<<<<<<<
* else:
* return x
*/
__pyx_r = (__pyx_v_x + exp((-__pyx_v_x)));
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":51
* elif -37. <= x <= 18.:
* return log1p(exp(x))
* elif 18. < x <= 33.3: # <<<<<<<<<<<<<<
* return x + exp(-x)
* else:
*/
}
/* "fairseq/cps_dp.pyx":54
* return x + exp(-x)
* else:
* return x # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
/*else*/ {
__pyx_r = __pyx_v_x;
goto __pyx_L0;
}
/* "fairseq/cps_dp.pyx":40
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef inline DTYPE_t log1pexp(DTYPE_t x) nogil: # <<<<<<<<<<<<<<
* """
* Numerically stable implementation of log(1+exp(x)) aka softmax(0,x).
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "fairseq/cps_dp.pyx":58
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef inline DTYPE_t log_add(DTYPE_t x, DTYPE_t y) nogil: # <<<<<<<<<<<<<<
* """
* Addition of 2 values in log space.
*/
static CYTHON_INLINE __pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_f_7fairseq_6cps_dp_log_add(__pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_v_x, __pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_v_y) {
__pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_r;
int __pyx_t_1;
/* "fairseq/cps_dp.pyx":63
* Need separate checks for inf because inf-inf=nan
* """
* if x == -INFINITY: # <<<<<<<<<<<<<<
* return y
* elif y == -INFINITY:
*/
__pyx_t_1 = ((__pyx_v_x == (-INFINITY)) != 0);
if (__pyx_t_1) {
/* "fairseq/cps_dp.pyx":64
* """
* if x == -INFINITY:
* return y # <<<<<<<<<<<<<<
* elif y == -INFINITY:
* return x
*/
__pyx_r = __pyx_v_y;
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":63
* Need separate checks for inf because inf-inf=nan
* """
* if x == -INFINITY: # <<<<<<<<<<<<<<
* return y
* elif y == -INFINITY:
*/
}
/* "fairseq/cps_dp.pyx":65
* if x == -INFINITY:
* return y
* elif y == -INFINITY: # <<<<<<<<<<<<<<
* return x
* else:
*/
__pyx_t_1 = ((__pyx_v_y == (-INFINITY)) != 0);
if (__pyx_t_1) {
/* "fairseq/cps_dp.pyx":66
* return y
* elif y == -INFINITY:
* return x # <<<<<<<<<<<<<<
* else:
* if y <= x:
*/
__pyx_r = __pyx_v_x;
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":65
* if x == -INFINITY:
* return y
* elif y == -INFINITY: # <<<<<<<<<<<<<<
* return x
* else:
*/
}
/* "fairseq/cps_dp.pyx":68
* return x
* else:
* if y <= x: # <<<<<<<<<<<<<<
* return x + log1pexp(y - x)
* else:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_y <= __pyx_v_x) != 0);
if (__pyx_t_1) {
/* "fairseq/cps_dp.pyx":69
* else:
* if y <= x:
* return x + log1pexp(y - x) # <<<<<<<<<<<<<<
* else:
* return y + log1pexp(x - y)
*/
__pyx_r = (__pyx_v_x + __pyx_f_7fairseq_6cps_dp_log1pexp((__pyx_v_y - __pyx_v_x)));
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":68
* return x
* else:
* if y <= x: # <<<<<<<<<<<<<<
* return x + log1pexp(y - x)
* else:
*/
}
/* "fairseq/cps_dp.pyx":71
* return x + log1pexp(y - x)
* else:
* return y + log1pexp(x - y) # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
/*else*/ {
__pyx_r = (__pyx_v_y + __pyx_f_7fairseq_6cps_dp_log1pexp((__pyx_v_x - __pyx_v_y)));
goto __pyx_L0;
}
}
/* "fairseq/cps_dp.pyx":58
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef inline DTYPE_t log_add(DTYPE_t x, DTYPE_t y) nogil: # <<<<<<<<<<<<<<
* """
* Addition of 2 values in log space.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "fairseq/cps_dp.pyx":75
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def calc_normalization(np.ndarray[DTYPE_t, ndim=1] logp_sliced, int k): # <<<<<<<<<<<<<<
* """
* This function calculates the normalization factor in CPS which is
*/
/* Python wrapper */
static PyObject *__pyx_pw_7fairseq_6cps_dp_1calc_normalization(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7fairseq_6cps_dp_calc_normalization[] = "\n This function calculates the normalization factor in CPS which is\n sum of product of weights of all sets that have size k\n @param logp_sliced: weights of candidates in log space\n @param k: sample size\n @return: dp matrix containing all normalization factors\n ";
static PyMethodDef __pyx_mdef_7fairseq_6cps_dp_1calc_normalization = {"calc_normalization", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7fairseq_6cps_dp_1calc_normalization, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7fairseq_6cps_dp_calc_normalization};
static PyObject *__pyx_pw_7fairseq_6cps_dp_1calc_normalization(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_logp_sliced = 0;
int __pyx_v_k;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("calc_normalization (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_logp_sliced,&__pyx_n_s_k,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_logp_sliced)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_k)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("calc_normalization", 1, 2, 2, 1); __PYX_ERR(0, 75, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "calc_normalization") < 0)) __PYX_ERR(0, 75, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_logp_sliced = ((PyArrayObject *)values[0]);
__pyx_v_k = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_k == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("calc_normalization", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 75, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("fairseq.cps_dp.calc_normalization", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_logp_sliced), __pyx_ptype_5numpy_ndarray, 1, "logp_sliced", 0))) __PYX_ERR(0, 75, __pyx_L1_error)
__pyx_r = __pyx_pf_7fairseq_6cps_dp_calc_normalization(__pyx_self, __pyx_v_logp_sliced, __pyx_v_k);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7fairseq_6cps_dp_calc_normalization(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_logp_sliced, int __pyx_v_k) {
int __pyx_v_n;
PyArrayObject *__pyx_v_subset_sum_product_probs = 0;
float __pyx_v_intermediate_res;
int __pyx_v_r;
int __pyx_v_i;
__Pyx_LocalBuf_ND __pyx_pybuffernd_logp_sliced;
__Pyx_Buffer __pyx_pybuffer_logp_sliced;
__Pyx_LocalBuf_ND __pyx_pybuffernd_subset_sum_product_probs;
__Pyx_Buffer __pyx_pybuffer_subset_sum_product_probs;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyArrayObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
long __pyx_t_12;
long __pyx_t_13;
long __pyx_t_14;
long __pyx_t_15;
long __pyx_t_16;
Py_ssize_t __pyx_t_17;
Py_ssize_t __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("calc_normalization", 0);
__pyx_pybuffer_subset_sum_product_probs.pybuffer.buf = NULL;
__pyx_pybuffer_subset_sum_product_probs.refcount = 0;
__pyx_pybuffernd_subset_sum_product_probs.data = NULL;
__pyx_pybuffernd_subset_sum_product_probs.rcbuffer = &__pyx_pybuffer_subset_sum_product_probs;
__pyx_pybuffer_logp_sliced.pybuffer.buf = NULL;
__pyx_pybuffer_logp_sliced.refcount = 0;
__pyx_pybuffernd_logp_sliced.data = NULL;
__pyx_pybuffernd_logp_sliced.rcbuffer = &__pyx_pybuffer_logp_sliced;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer, (PyObject*)__pyx_v_logp_sliced, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 75, __pyx_L1_error)
}
__pyx_pybuffernd_logp_sliced.diminfo[0].strides = __pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_logp_sliced.diminfo[0].shape = __pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer.shape[0];
/* "fairseq/cps_dp.pyx":83
* @return: dp matrix containing all normalization factors
* """
* cdef int n = len(logp_sliced) # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=2] subset_sum_product_probs
*
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_logp_sliced)); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 83, __pyx_L1_error)
__pyx_v_n = __pyx_t_1;
/* "fairseq/cps_dp.pyx":86
* cdef np.ndarray[DTYPE_t, ndim=2] subset_sum_product_probs
*
* subset_sum_product_probs = np.full((k + 1, n + 1), -np.inf, dtype=np.float64) # <<<<<<<<<<<<<<
* subset_sum_product_probs[0, :] = 0.
* cdef float intermediate_res
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_full); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_long((__pyx_v_k + 1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyInt_From_long((__pyx_v_n + 1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_inf); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyNumber_Negative(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__pyx_t_5 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_float64); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 86, __pyx_L1_error)
__pyx_t_7 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer);
__pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_8 < 0)) {
PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer, (PyObject*)__pyx_v_subset_sum_product_probs, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11);
}
__pyx_t_9 = __pyx_t_10 = __pyx_t_11 = 0;
}
__pyx_pybuffernd_subset_sum_product_probs.diminfo[0].strides = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[0].shape = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].strides = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].shape = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 86, __pyx_L1_error)
}
__pyx_t_7 = 0;
__pyx_v_subset_sum_product_probs = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
/* "fairseq/cps_dp.pyx":87
*
* subset_sum_product_probs = np.full((k + 1, n + 1), -np.inf, dtype=np.float64)
* subset_sum_product_probs[0, :] = 0. # <<<<<<<<<<<<<<
* cdef float intermediate_res
* cdef int r
*/
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_subset_sum_product_probs), __pyx_tuple__2, __pyx_float_0_) < 0)) __PYX_ERR(0, 87, __pyx_L1_error)
/* "fairseq/cps_dp.pyx":92
* cdef int i
*
* for r in range(1, k + 1): # <<<<<<<<<<<<<<
* for i in prange(1, n + 1, nogil=True):
* intermediate_res = subset_sum_product_probs[r - 1, i - 1] + logp_sliced[i - 1]
*/
__pyx_t_12 = (__pyx_v_k + 1);
__pyx_t_13 = __pyx_t_12;
for (__pyx_t_8 = 1; __pyx_t_8 < __pyx_t_13; __pyx_t_8+=1) {
__pyx_v_r = __pyx_t_8;
/* "fairseq/cps_dp.pyx":93
*
* for r in range(1, k + 1):
* for i in prange(1, n + 1, nogil=True): # <<<<<<<<<<<<<<
* intermediate_res = subset_sum_product_probs[r - 1, i - 1] + logp_sliced[i - 1]
* subset_sum_product_probs[r, i] = log_add(subset_sum_product_probs[r, i - 1], intermediate_res)
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_14 = (__pyx_v_n + 1);
if ((1 == 0)) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_16 = (__pyx_t_14 - 1 + 1 - 1/abs(1)) / 1;
if (__pyx_t_16 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_intermediate_res)
#endif /* _OPENMP */
for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_16; __pyx_t_15++){
{
__pyx_v_i = (int)(1 + 1 * __pyx_t_15);
/* Initialize private variables to invalid values */
__pyx_v_intermediate_res = ((float)__PYX_NAN());
/* "fairseq/cps_dp.pyx":94
* for r in range(1, k + 1):
* for i in prange(1, n + 1, nogil=True):
* intermediate_res = subset_sum_product_probs[r - 1, i - 1] + logp_sliced[i - 1] # <<<<<<<<<<<<<<
* subset_sum_product_probs[r, i] = log_add(subset_sum_product_probs[r, i - 1], intermediate_res)
* return subset_sum_product_probs
*/
__pyx_t_17 = (__pyx_v_r - 1);
__pyx_t_18 = (__pyx_v_i - 1);
__pyx_t_19 = (__pyx_v_i - 1);
__pyx_v_intermediate_res = ((*__Pyx_BufPtrStrided2d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_subset_sum_product_probs.diminfo[0].strides, __pyx_t_18, __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].strides)) + (*__Pyx_BufPtrStrided1d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_logp_sliced.diminfo[0].strides)));
/* "fairseq/cps_dp.pyx":95
* for i in prange(1, n + 1, nogil=True):
* intermediate_res = subset_sum_product_probs[r - 1, i - 1] + logp_sliced[i - 1]
* subset_sum_product_probs[r, i] = log_add(subset_sum_product_probs[r, i - 1], intermediate_res) # <<<<<<<<<<<<<<
* return subset_sum_product_probs
*
*/
__pyx_t_19 = __pyx_v_r;
__pyx_t_18 = (__pyx_v_i - 1);
__pyx_t_17 = __pyx_v_r;
__pyx_t_20 = __pyx_v_i;
*__Pyx_BufPtrStrided2d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_subset_sum_product_probs.diminfo[0].strides, __pyx_t_20, __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].strides) = __pyx_f_7fairseq_6cps_dp_log_add((*__Pyx_BufPtrStrided2d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_subset_sum_product_probs.diminfo[0].strides, __pyx_t_18, __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].strides)), __pyx_v_intermediate_res);
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "fairseq/cps_dp.pyx":93
*
* for r in range(1, k + 1):
* for i in prange(1, n + 1, nogil=True): # <<<<<<<<<<<<<<
* intermediate_res = subset_sum_product_probs[r - 1, i - 1] + logp_sliced[i - 1]
* subset_sum_product_probs[r, i] = log_add(subset_sum_product_probs[r, i - 1], intermediate_res)
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L9;
}
__pyx_L9:;
}
}
}
/* "fairseq/cps_dp.pyx":96
* intermediate_res = subset_sum_product_probs[r - 1, i - 1] + logp_sliced[i - 1]
* subset_sum_product_probs[r, i] = log_add(subset_sum_product_probs[r, i - 1], intermediate_res)
* return subset_sum_product_probs # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_subset_sum_product_probs));
__pyx_r = ((PyObject *)__pyx_v_subset_sum_product_probs);
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":75
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def calc_normalization(np.ndarray[DTYPE_t, ndim=1] logp_sliced, int k): # <<<<<<<<<<<<<<
* """
* This function calculates the normalization factor in CPS which is
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("fairseq.cps_dp.calc_normalization", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_subset_sum_product_probs);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "fairseq/cps_dp.pyx":100
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def calc_log_inclusion_probs(np.ndarray[DTYPE_t, ndim=1] logp_sliced, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] subset_sum_product_probs, int k):
* """
*/
/* Python wrapper */
static PyObject *__pyx_pw_7fairseq_6cps_dp_3calc_log_inclusion_probs(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7fairseq_6cps_dp_2calc_log_inclusion_probs[] = "\n This function calculates the inclusion probability for CPS design\n operates in log space\n @param logp_sliced: weights of candidates which can be probabilities or odds\n @param subset_sum_product_probs: normalization factors\n @param k:sample size\n @return: log inclusion probabilities\n ";
static PyMethodDef __pyx_mdef_7fairseq_6cps_dp_3calc_log_inclusion_probs = {"calc_log_inclusion_probs", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7fairseq_6cps_dp_3calc_log_inclusion_probs, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7fairseq_6cps_dp_2calc_log_inclusion_probs};
static PyObject *__pyx_pw_7fairseq_6cps_dp_3calc_log_inclusion_probs(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_logp_sliced = 0;
PyArrayObject *__pyx_v_subset_sum_product_probs = 0;
int __pyx_v_k;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("calc_log_inclusion_probs (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_logp_sliced,&__pyx_n_s_subset_sum_product_probs,&__pyx_n_s_k,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_logp_sliced)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_subset_sum_product_probs)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("calc_log_inclusion_probs", 1, 3, 3, 1); __PYX_ERR(0, 100, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_k)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("calc_log_inclusion_probs", 1, 3, 3, 2); __PYX_ERR(0, 100, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "calc_log_inclusion_probs") < 0)) __PYX_ERR(0, 100, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_logp_sliced = ((PyArrayObject *)values[0]);
__pyx_v_subset_sum_product_probs = ((PyArrayObject *)values[1]);
__pyx_v_k = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_k == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 101, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("calc_log_inclusion_probs", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 100, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("fairseq.cps_dp.calc_log_inclusion_probs", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_logp_sliced), __pyx_ptype_5numpy_ndarray, 1, "logp_sliced", 0))) __PYX_ERR(0, 100, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_subset_sum_product_probs), __pyx_ptype_5numpy_ndarray, 1, "subset_sum_product_probs", 0))) __PYX_ERR(0, 101, __pyx_L1_error)
__pyx_r = __pyx_pf_7fairseq_6cps_dp_2calc_log_inclusion_probs(__pyx_self, __pyx_v_logp_sliced, __pyx_v_subset_sum_product_probs, __pyx_v_k);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7fairseq_6cps_dp_2calc_log_inclusion_probs(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_logp_sliced, PyArrayObject *__pyx_v_subset_sum_product_probs, int __pyx_v_k) {
int __pyx_v_n;
PyArrayObject *__pyx_v_dp = 0;
PyArrayObject *__pyx_v_log_inclusion_probs = 0;
PyArrayObject *__pyx_v_remaining_subsetsum_product_probs = 0;
int __pyx_v_r;
int __pyx_v_i;
__Pyx_LocalBuf_ND __pyx_pybuffernd_dp;
__Pyx_Buffer __pyx_pybuffer_dp;
__Pyx_LocalBuf_ND __pyx_pybuffernd_log_inclusion_probs;
__Pyx_Buffer __pyx_pybuffer_log_inclusion_probs;
__Pyx_LocalBuf_ND __pyx_pybuffernd_logp_sliced;
__Pyx_Buffer __pyx_pybuffer_logp_sliced;
__Pyx_LocalBuf_ND __pyx_pybuffernd_remaining_subsetsum_product_probs;
__Pyx_Buffer __pyx_pybuffer_remaining_subsetsum_product_probs;
__Pyx_LocalBuf_ND __pyx_pybuffernd_subset_sum_product_probs;
__Pyx_Buffer __pyx_pybuffer_subset_sum_product_probs;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyArrayObject *__pyx_t_7 = NULL;
PyArrayObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_t_10;
long __pyx_t_11;
long __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
Py_ssize_t __pyx_t_18;
Py_ssize_t __pyx_t_19;
PyArrayObject *__pyx_t_20 = NULL;
PyObject *__pyx_t_21 = NULL;
PyObject *__pyx_t_22 = NULL;
PyObject *__pyx_t_23 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("calc_log_inclusion_probs", 0);
__pyx_pybuffer_dp.pybuffer.buf = NULL;
__pyx_pybuffer_dp.refcount = 0;
__pyx_pybuffernd_dp.data = NULL;
__pyx_pybuffernd_dp.rcbuffer = &__pyx_pybuffer_dp;
__pyx_pybuffer_log_inclusion_probs.pybuffer.buf = NULL;
__pyx_pybuffer_log_inclusion_probs.refcount = 0;
__pyx_pybuffernd_log_inclusion_probs.data = NULL;
__pyx_pybuffernd_log_inclusion_probs.rcbuffer = &__pyx_pybuffer_log_inclusion_probs;
__pyx_pybuffer_remaining_subsetsum_product_probs.pybuffer.buf = NULL;
__pyx_pybuffer_remaining_subsetsum_product_probs.refcount = 0;
__pyx_pybuffernd_remaining_subsetsum_product_probs.data = NULL;
__pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer = &__pyx_pybuffer_remaining_subsetsum_product_probs;
__pyx_pybuffer_logp_sliced.pybuffer.buf = NULL;
__pyx_pybuffer_logp_sliced.refcount = 0;
__pyx_pybuffernd_logp_sliced.data = NULL;
__pyx_pybuffernd_logp_sliced.rcbuffer = &__pyx_pybuffer_logp_sliced;
__pyx_pybuffer_subset_sum_product_probs.pybuffer.buf = NULL;
__pyx_pybuffer_subset_sum_product_probs.refcount = 0;
__pyx_pybuffernd_subset_sum_product_probs.data = NULL;
__pyx_pybuffernd_subset_sum_product_probs.rcbuffer = &__pyx_pybuffer_subset_sum_product_probs;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer, (PyObject*)__pyx_v_logp_sliced, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 100, __pyx_L1_error)
}
__pyx_pybuffernd_logp_sliced.diminfo[0].strides = __pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_logp_sliced.diminfo[0].shape = __pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer, (PyObject*)__pyx_v_subset_sum_product_probs, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 100, __pyx_L1_error)
}
__pyx_pybuffernd_subset_sum_product_probs.diminfo[0].strides = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[0].shape = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].strides = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].shape = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.shape[1];
/* "fairseq/cps_dp.pyx":110
* @return: log inclusion probabilities
* """
* cdef int n = len(logp_sliced) # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=1] dp = np.full(n, -np.inf, dtype=np.float64)
* cdef np.ndarray[DTYPE_t, ndim=1] log_inclusion_probs
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_logp_sliced)); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 110, __pyx_L1_error)
__pyx_v_n = __pyx_t_1;
/* "fairseq/cps_dp.pyx":111
* """
* cdef int n = len(logp_sliced)
* cdef np.ndarray[DTYPE_t, ndim=1] dp = np.full(n, -np.inf, dtype=np.float64) # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=1] log_inclusion_probs
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_full); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_n); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_inf); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyNumber_Negative(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float64); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 111, __pyx_L1_error)
__pyx_t_7 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dp.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_dp = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_dp.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 111, __pyx_L1_error)
} else {__pyx_pybuffernd_dp.diminfo[0].strides = __pyx_pybuffernd_dp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dp.diminfo[0].shape = __pyx_pybuffernd_dp.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_7 = 0;
__pyx_v_dp = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
/* "fairseq/cps_dp.pyx":114
* cdef np.ndarray[DTYPE_t, ndim=1] log_inclusion_probs
*
* cdef np.ndarray[DTYPE_t, ndim=2] remaining_subsetsum_product_probs = np.full((k + 2, n + 2), -np.inf, # <<<<<<<<<<<<<<
* dtype=np.float64)
* remaining_subsetsum_product_probs[k, :] = 0.
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_full); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_k + 2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyInt_From_long((__pyx_v_n + 2)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5);
__pyx_t_6 = 0;
__pyx_t_5 = 0;
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_inf); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = PyNumber_Negative(__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5);
__pyx_t_3 = 0;
__pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":115
*
* cdef np.ndarray[DTYPE_t, ndim=2] remaining_subsetsum_product_probs = np.full((k + 2, n + 2), -np.inf,
* dtype=np.float64) # <<<<<<<<<<<<<<
* remaining_subsetsum_product_probs[k, :] = 0.
*
*/
__pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_float64); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "fairseq/cps_dp.pyx":114
* cdef np.ndarray[DTYPE_t, ndim=1] log_inclusion_probs
*
* cdef np.ndarray[DTYPE_t, ndim=2] remaining_subsetsum_product_probs = np.full((k + 2, n + 2), -np.inf, # <<<<<<<<<<<<<<
* dtype=np.float64)
* remaining_subsetsum_product_probs[k, :] = 0.
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 114, __pyx_L1_error)
__pyx_t_8 = ((PyArrayObject *)__pyx_t_2);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_remaining_subsetsum_product_probs = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 114, __pyx_L1_error)
} else {__pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[0].strides = __pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[0].shape = __pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[1].strides = __pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[1].shape = __pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer.shape[1];
}
}
__pyx_t_8 = 0;
__pyx_v_remaining_subsetsum_product_probs = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
/* "fairseq/cps_dp.pyx":116
* cdef np.ndarray[DTYPE_t, ndim=2] remaining_subsetsum_product_probs = np.full((k + 2, n + 2), -np.inf,
* dtype=np.float64)
* remaining_subsetsum_product_probs[k, :] = 0. # <<<<<<<<<<<<<<
*
* cdef int r
*/
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_k); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_INCREF(__pyx_slice_);
__Pyx_GIVEREF(__pyx_slice_);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_slice_);
__pyx_t_2 = 0;
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_remaining_subsetsum_product_probs), __pyx_t_5, __pyx_float_0_) < 0)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":120
* cdef int r
* cdef int i
* for r in range(k, 0, -1): # <<<<<<<<<<<<<<
* for i in prange(n, 0, -1, nogil=True):
* dp[i - 1] = log_add(dp[i - 1],
*/
for (__pyx_t_9 = __pyx_v_k; __pyx_t_9 > 0; __pyx_t_9-=1) {
__pyx_v_r = __pyx_t_9;
/* "fairseq/cps_dp.pyx":121
* cdef int i
* for r in range(k, 0, -1):
* for i in prange(n, 0, -1, nogil=True): # <<<<<<<<<<<<<<
* dp[i - 1] = log_add(dp[i - 1],
* subset_sum_product_probs[r - 1, i - 1] + remaining_subsetsum_product_probs[r, i + 1])
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_10 = __pyx_v_n;
if ((-1L == 0)) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_12 = (0 - __pyx_t_10 + -1L - -1L/abs(-1L)) / -1L;
if (__pyx_t_12 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i)
#endif /* _OPENMP */
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_12; __pyx_t_11++){
{
__pyx_v_i = (int)(__pyx_t_10 + -1L * __pyx_t_11);
/* "fairseq/cps_dp.pyx":122
* for r in range(k, 0, -1):
* for i in prange(n, 0, -1, nogil=True):
* dp[i - 1] = log_add(dp[i - 1], # <<<<<<<<<<<<<<
* subset_sum_product_probs[r - 1, i - 1] + remaining_subsetsum_product_probs[r, i + 1])
* remaining_subsetsum_product_probs[r, i] = log_add(
*/
__pyx_t_13 = (__pyx_v_i - 1);
/* "fairseq/cps_dp.pyx":123
* for i in prange(n, 0, -1, nogil=True):
* dp[i - 1] = log_add(dp[i - 1],
* subset_sum_product_probs[r - 1, i - 1] + remaining_subsetsum_product_probs[r, i + 1]) # <<<<<<<<<<<<<<
* remaining_subsetsum_product_probs[r, i] = log_add(
* remaining_subsetsum_product_probs[r + 1, i + 1] + logp_sliced[i - 1],
*/
__pyx_t_14 = (__pyx_v_r - 1);
__pyx_t_15 = (__pyx_v_i - 1);
__pyx_t_16 = __pyx_v_r;
__pyx_t_17 = (__pyx_v_i + 1);
/* "fairseq/cps_dp.pyx":122
* for r in range(k, 0, -1):
* for i in prange(n, 0, -1, nogil=True):
* dp[i - 1] = log_add(dp[i - 1], # <<<<<<<<<<<<<<
* subset_sum_product_probs[r - 1, i - 1] + remaining_subsetsum_product_probs[r, i + 1])
* remaining_subsetsum_product_probs[r, i] = log_add(
*/
__pyx_t_18 = (__pyx_v_i - 1);
*__Pyx_BufPtrStrided1d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_dp.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_dp.diminfo[0].strides) = __pyx_f_7fairseq_6cps_dp_log_add((*__Pyx_BufPtrStrided1d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_dp.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_dp.diminfo[0].strides)), ((*__Pyx_BufPtrStrided2d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_subset_sum_product_probs.diminfo[0].strides, __pyx_t_15, __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].strides)) + (*__Pyx_BufPtrStrided2d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[1].strides))));
/* "fairseq/cps_dp.pyx":125
* subset_sum_product_probs[r - 1, i - 1] + remaining_subsetsum_product_probs[r, i + 1])
* remaining_subsetsum_product_probs[r, i] = log_add(
* remaining_subsetsum_product_probs[r + 1, i + 1] + logp_sliced[i - 1], # <<<<<<<<<<<<<<
* remaining_subsetsum_product_probs[r, i + 1])
*
*/
__pyx_t_17 = (__pyx_v_r + 1);
__pyx_t_16 = (__pyx_v_i + 1);
__pyx_t_15 = (__pyx_v_i - 1);
/* "fairseq/cps_dp.pyx":126
* remaining_subsetsum_product_probs[r, i] = log_add(
* remaining_subsetsum_product_probs[r + 1, i + 1] + logp_sliced[i - 1],
* remaining_subsetsum_product_probs[r, i + 1]) # <<<<<<<<<<<<<<
*
* log_inclusion_probs = logp_sliced + dp - subset_sum_product_probs[k, n]
*/
__pyx_t_14 = __pyx_v_r;
__pyx_t_13 = (__pyx_v_i + 1);
/* "fairseq/cps_dp.pyx":124
* dp[i - 1] = log_add(dp[i - 1],
* subset_sum_product_probs[r - 1, i - 1] + remaining_subsetsum_product_probs[r, i + 1])
* remaining_subsetsum_product_probs[r, i] = log_add( # <<<<<<<<<<<<<<
* remaining_subsetsum_product_probs[r + 1, i + 1] + logp_sliced[i - 1],
* remaining_subsetsum_product_probs[r, i + 1])
*/
__pyx_t_18 = __pyx_v_r;
__pyx_t_19 = __pyx_v_i;
*__Pyx_BufPtrStrided2d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[0].strides, __pyx_t_19, __pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[1].strides) = __pyx_f_7fairseq_6cps_dp_log_add(((*__Pyx_BufPtrStrided2d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[0].strides, __pyx_t_16, __pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[1].strides)) + (*__Pyx_BufPtrStrided1d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_logp_sliced.diminfo[0].strides))), (*__Pyx_BufPtrStrided2d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_remaining_subsetsum_product_probs.diminfo[1].strides)));
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "fairseq/cps_dp.pyx":121
* cdef int i
* for r in range(k, 0, -1):
* for i in prange(n, 0, -1, nogil=True): # <<<<<<<<<<<<<<
* dp[i - 1] = log_add(dp[i - 1],
* subset_sum_product_probs[r - 1, i - 1] + remaining_subsetsum_product_probs[r, i + 1])
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L9;
}
__pyx_L9:;
}
}
}
/* "fairseq/cps_dp.pyx":128
* remaining_subsetsum_product_probs[r, i + 1])
*
* log_inclusion_probs = logp_sliced + dp - subset_sum_product_probs[k, n] # <<<<<<<<<<<<<<
* return log_inclusion_probs
*
*/
__pyx_t_5 = PyNumber_Add(((PyObject *)__pyx_v_logp_sliced), ((PyObject *)__pyx_v_dp)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 128, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_13 = __pyx_v_k;
__pyx_t_14 = __pyx_v_n;
__pyx_t_2 = PyFloat_FromDouble((*__Pyx_BufPtrStrided2d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_subset_sum_product_probs.diminfo[0].strides, __pyx_t_14, __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 128, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_6 = PyNumber_Subtract(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 128, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 128, __pyx_L1_error)
__pyx_t_20 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer, (PyObject*)__pyx_t_20, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_21, &__pyx_t_22, &__pyx_t_23);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer, (PyObject*)__pyx_v_log_inclusion_probs, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_21); Py_XDECREF(__pyx_t_22); Py_XDECREF(__pyx_t_23);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_21, __pyx_t_22, __pyx_t_23);
}
__pyx_t_21 = __pyx_t_22 = __pyx_t_23 = 0;
}
__pyx_pybuffernd_log_inclusion_probs.diminfo[0].strides = __pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_log_inclusion_probs.diminfo[0].shape = __pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 128, __pyx_L1_error)
}
__pyx_t_20 = 0;
__pyx_v_log_inclusion_probs = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
/* "fairseq/cps_dp.pyx":129
*
* log_inclusion_probs = logp_sliced + dp - subset_sum_product_probs[k, n]
* return log_inclusion_probs # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_log_inclusion_probs));
__pyx_r = ((PyObject *)__pyx_v_log_inclusion_probs);
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":100
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def calc_log_inclusion_probs(np.ndarray[DTYPE_t, ndim=1] logp_sliced, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] subset_sum_product_probs, int k):
* """
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dp.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("fairseq.cps_dp.calc_log_inclusion_probs", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dp.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_logp_sliced.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_remaining_subsetsum_product_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_dp);
__Pyx_XDECREF((PyObject *)__pyx_v_log_inclusion_probs);
__Pyx_XDECREF((PyObject *)__pyx_v_remaining_subsetsum_product_probs);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "fairseq/cps_dp.pyx":133
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def sample(np.ndarray[DTYPE_t, ndim=1] logp, np.ndarray[DTYPE_int_t, ndim=1] selected_inds, int k): # <<<<<<<<<<<<<<
* """
* This function picks a sample of size k from candidates
*/
/* Python wrapper */
static PyObject *__pyx_pw_7fairseq_6cps_dp_5sample(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7fairseq_6cps_dp_4sample[] = "\n This function picks a sample of size k from candidates\n @param logp: log probability of candidates\n @param selected_inds: selected candidates after nucleus filtering\n @param k: sample size\n @return: selected candidates indices and their inclusion probabilities\n ";
static PyMethodDef __pyx_mdef_7fairseq_6cps_dp_5sample = {"sample", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7fairseq_6cps_dp_5sample, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7fairseq_6cps_dp_4sample};
static PyObject *__pyx_pw_7fairseq_6cps_dp_5sample(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_logp = 0;
PyArrayObject *__pyx_v_selected_inds = 0;
int __pyx_v_k;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("sample (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_logp,&__pyx_n_s_selected_inds,&__pyx_n_s_k,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_logp)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_selected_inds)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("sample", 1, 3, 3, 1); __PYX_ERR(0, 133, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_k)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("sample", 1, 3, 3, 2); __PYX_ERR(0, 133, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "sample") < 0)) __PYX_ERR(0, 133, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_logp = ((PyArrayObject *)values[0]);
__pyx_v_selected_inds = ((PyArrayObject *)values[1]);
__pyx_v_k = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_k == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 133, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("sample", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 133, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("fairseq.cps_dp.sample", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_logp), __pyx_ptype_5numpy_ndarray, 1, "logp", 0))) __PYX_ERR(0, 133, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_selected_inds), __pyx_ptype_5numpy_ndarray, 1, "selected_inds", 0))) __PYX_ERR(0, 133, __pyx_L1_error)
__pyx_r = __pyx_pf_7fairseq_6cps_dp_4sample(__pyx_self, __pyx_v_logp, __pyx_v_selected_inds, __pyx_v_k);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7fairseq_6cps_dp_4sample(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_logp, PyArrayObject *__pyx_v_selected_inds, int __pyx_v_k) {
long __pyx_v_n;
PyObject *__pyx_v_samples_idx = 0;
PyObject *__pyx_v_selected_incs = 0;
PyArrayObject *__pyx_v_thresholds = 0;
PyArrayObject *__pyx_v_log_weights = 0;
PyArrayObject *__pyx_v_log_prob_filtered = 0;
long __pyx_v_i;
PyArrayObject *__pyx_v_subset_sum_product_probs = 0;
__pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_v_thresh;
int __pyx_v_to_pick_number;
PyArrayObject *__pyx_v_log_inclusion_probs = 0;
__Pyx_LocalBuf_ND __pyx_pybuffernd_log_inclusion_probs;
__Pyx_Buffer __pyx_pybuffer_log_inclusion_probs;
__Pyx_LocalBuf_ND __pyx_pybuffernd_log_prob_filtered;
__Pyx_Buffer __pyx_pybuffer_log_prob_filtered;
__Pyx_LocalBuf_ND __pyx_pybuffernd_log_weights;
__Pyx_Buffer __pyx_pybuffer_log_weights;
__Pyx_LocalBuf_ND __pyx_pybuffernd_logp;
__Pyx_Buffer __pyx_pybuffer_logp;
__Pyx_LocalBuf_ND __pyx_pybuffernd_selected_inds;
__Pyx_Buffer __pyx_pybuffer_selected_inds;
__Pyx_LocalBuf_ND __pyx_pybuffernd_subset_sum_product_probs;
__Pyx_Buffer __pyx_pybuffer_subset_sum_product_probs;
__Pyx_LocalBuf_ND __pyx_pybuffernd_thresholds;
__Pyx_Buffer __pyx_pybuffer_thresholds;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyArrayObject *__pyx_t_10 = NULL;
PyArrayObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
PyObject *__pyx_t_13 = NULL;
PyObject *__pyx_t_14 = NULL;
PyArrayObject *__pyx_t_15 = NULL;
PyArrayObject *__pyx_t_16 = NULL;
PyArrayObject *__pyx_t_17 = NULL;
Py_ssize_t __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
int __pyx_t_23;
int __pyx_t_24;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("sample", 0);
__pyx_pybuffer_thresholds.pybuffer.buf = NULL;
__pyx_pybuffer_thresholds.refcount = 0;
__pyx_pybuffernd_thresholds.data = NULL;
__pyx_pybuffernd_thresholds.rcbuffer = &__pyx_pybuffer_thresholds;
__pyx_pybuffer_log_weights.pybuffer.buf = NULL;
__pyx_pybuffer_log_weights.refcount = 0;
__pyx_pybuffernd_log_weights.data = NULL;
__pyx_pybuffernd_log_weights.rcbuffer = &__pyx_pybuffer_log_weights;
__pyx_pybuffer_log_prob_filtered.pybuffer.buf = NULL;
__pyx_pybuffer_log_prob_filtered.refcount = 0;
__pyx_pybuffernd_log_prob_filtered.data = NULL;
__pyx_pybuffernd_log_prob_filtered.rcbuffer = &__pyx_pybuffer_log_prob_filtered;
__pyx_pybuffer_subset_sum_product_probs.pybuffer.buf = NULL;
__pyx_pybuffer_subset_sum_product_probs.refcount = 0;
__pyx_pybuffernd_subset_sum_product_probs.data = NULL;
__pyx_pybuffernd_subset_sum_product_probs.rcbuffer = &__pyx_pybuffer_subset_sum_product_probs;
__pyx_pybuffer_log_inclusion_probs.pybuffer.buf = NULL;
__pyx_pybuffer_log_inclusion_probs.refcount = 0;
__pyx_pybuffernd_log_inclusion_probs.data = NULL;
__pyx_pybuffernd_log_inclusion_probs.rcbuffer = &__pyx_pybuffer_log_inclusion_probs;
__pyx_pybuffer_logp.pybuffer.buf = NULL;
__pyx_pybuffer_logp.refcount = 0;
__pyx_pybuffernd_logp.data = NULL;
__pyx_pybuffernd_logp.rcbuffer = &__pyx_pybuffer_logp;
__pyx_pybuffer_selected_inds.pybuffer.buf = NULL;
__pyx_pybuffer_selected_inds.refcount = 0;
__pyx_pybuffernd_selected_inds.data = NULL;
__pyx_pybuffernd_selected_inds.rcbuffer = &__pyx_pybuffer_selected_inds;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_logp.rcbuffer->pybuffer, (PyObject*)__pyx_v_logp, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 133, __pyx_L1_error)
}
__pyx_pybuffernd_logp.diminfo[0].strides = __pyx_pybuffernd_logp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_logp.diminfo[0].shape = __pyx_pybuffernd_logp.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_selected_inds.rcbuffer->pybuffer, (PyObject*)__pyx_v_selected_inds, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_int_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 133, __pyx_L1_error)
}
__pyx_pybuffernd_selected_inds.diminfo[0].strides = __pyx_pybuffernd_selected_inds.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_selected_inds.diminfo[0].shape = __pyx_pybuffernd_selected_inds.rcbuffer->pybuffer.shape[0];
/* "fairseq/cps_dp.pyx":141
* @return: selected candidates indices and their inclusion probabilities
* """
* cdef long n = len(logp) # <<<<<<<<<<<<<<
* k = min(n, k)
*
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_logp)); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 141, __pyx_L1_error)
__pyx_v_n = __pyx_t_1;
/* "fairseq/cps_dp.pyx":142
* """
* cdef long n = len(logp)
* k = min(n, k) # <<<<<<<<<<<<<<
*
* cdef list samples_idx = []
*/
__pyx_t_2 = __pyx_v_k;
__pyx_t_3 = __pyx_v_n;
if (((__pyx_t_2 < __pyx_t_3) != 0)) {
__pyx_t_4 = __pyx_t_2;
} else {
__pyx_t_4 = __pyx_t_3;
}
__pyx_v_k = __pyx_t_4;
/* "fairseq/cps_dp.pyx":144
* k = min(n, k)
*
* cdef list samples_idx = [] # <<<<<<<<<<<<<<
* cdef list selected_incs = []
* cdef np.ndarray[DTYPE_t, ndim=1] thresholds = np.log(np.random.uniform(size=n))
*/
__pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 144, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_samples_idx = ((PyObject*)__pyx_t_5);
__pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":145
*
* cdef list samples_idx = []
* cdef list selected_incs = [] # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=1] thresholds = np.log(np.random.uniform(size=n))
*
*/
__pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 145, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_selected_incs = ((PyObject*)__pyx_t_5);
__pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":146
* cdef list samples_idx = []
* cdef list selected_incs = []
* cdef np.ndarray[DTYPE_t, ndim=1] thresholds = np.log(np.random.uniform(size=n)) # <<<<<<<<<<<<<<
*
* cdef np.ndarray[DTYPE_t, ndim=1] log_weights # using odds approximation as weights
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_log); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_random); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_uniform); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_9 = __Pyx_PyInt_From_long(__pyx_v_n); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (PyDict_SetItem(__pyx_t_8, __pyx_n_s_size, __pyx_t_9) < 0) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_empty_tuple, __pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_5 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_9) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_9);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 146, __pyx_L1_error)
__pyx_t_10 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_thresholds.rcbuffer->pybuffer, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_thresholds = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_thresholds.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 146, __pyx_L1_error)
} else {__pyx_pybuffernd_thresholds.diminfo[0].strides = __pyx_pybuffernd_thresholds.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_thresholds.diminfo[0].shape = __pyx_pybuffernd_thresholds.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_10 = 0;
__pyx_v_thresholds = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":150
* cdef np.ndarray[DTYPE_t, ndim=1] log_weights # using odds approximation as weights
* cdef np.ndarray[DTYPE_t, ndim=1] log_prob_filtered
* log_prob_filtered = logp.copy() # <<<<<<<<<<<<<<
* log_prob_filtered[log_prob_filtered > 0.99] = 0.99 # clipping in order to prevent NAN generation
* log_weights = log_prob_filtered - np.array(list(map(log1mexp, log_prob_filtered)))
*/
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_logp), __pyx_n_s_copy); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_5 = (__pyx_t_9) ? __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_9) : __Pyx_PyObject_CallNoArg(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 150, __pyx_L1_error)
__pyx_t_11 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer);
__pyx_t_2 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_2 < 0)) {
PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer, (PyObject*)__pyx_v_log_prob_filtered, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14);
}
__pyx_t_12 = __pyx_t_13 = __pyx_t_14 = 0;
}
__pyx_pybuffernd_log_prob_filtered.diminfo[0].strides = __pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_log_prob_filtered.diminfo[0].shape = __pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 150, __pyx_L1_error)
}
__pyx_t_11 = 0;
__pyx_v_log_prob_filtered = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":151
* cdef np.ndarray[DTYPE_t, ndim=1] log_prob_filtered
* log_prob_filtered = logp.copy()
* log_prob_filtered[log_prob_filtered > 0.99] = 0.99 # clipping in order to prevent NAN generation # <<<<<<<<<<<<<<
* log_weights = log_prob_filtered - np.array(list(map(log1mexp, log_prob_filtered)))
*
*/
__pyx_t_5 = PyObject_RichCompare(((PyObject *)__pyx_v_log_prob_filtered), __pyx_float_0_99, Py_GT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 151, __pyx_L1_error)
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_log_prob_filtered), __pyx_t_5, __pyx_float_0_99) < 0)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":152
* log_prob_filtered = logp.copy()
* log_prob_filtered[log_prob_filtered > 0.99] = 0.99 # clipping in order to prevent NAN generation
* log_weights = log_prob_filtered - np.array(list(map(log1mexp, log_prob_filtered))) # <<<<<<<<<<<<<<
*
* cdef long i
*/
__Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_array); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = __Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(__pyx_f_7fairseq_6cps_dp_log1mexp); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_7);
__Pyx_INCREF(((PyObject *)__pyx_v_log_prob_filtered));
__Pyx_GIVEREF(((PyObject *)__pyx_v_log_prob_filtered));
PyTuple_SET_ITEM(__pyx_t_8, 1, ((PyObject *)__pyx_v_log_prob_filtered));
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_map, __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = PySequence_List(__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_9);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_9, function);
}
}
__pyx_t_5 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_9, __pyx_t_7, __pyx_t_8) : __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_8);
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyNumber_Subtract(((PyObject *)__pyx_v_log_prob_filtered), __pyx_t_5); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 152, __pyx_L1_error)
__pyx_t_15 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_weights.rcbuffer->pybuffer);
__pyx_t_2 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_weights.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_2 < 0)) {
PyErr_Fetch(&__pyx_t_14, &__pyx_t_13, &__pyx_t_12);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_weights.rcbuffer->pybuffer, (PyObject*)__pyx_v_log_weights, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_12);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_14, __pyx_t_13, __pyx_t_12);
}
__pyx_t_14 = __pyx_t_13 = __pyx_t_12 = 0;
}
__pyx_pybuffernd_log_weights.diminfo[0].strides = __pyx_pybuffernd_log_weights.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_log_weights.diminfo[0].shape = __pyx_pybuffernd_log_weights.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 152, __pyx_L1_error)
}
__pyx_t_15 = 0;
__pyx_v_log_weights = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":159
* cdef int to_pick_number
* cdef np.ndarray[DTYPE_t, ndim=1] log_inclusion_probs
* to_pick_number = k # <<<<<<<<<<<<<<
* subset_sum_product_probs = calc_normalization(log_weights, k)
* log_inclusion_probs = calc_log_inclusion_probs(log_weights, subset_sum_product_probs, k)
*/
__pyx_v_to_pick_number = __pyx_v_k;
/* "fairseq/cps_dp.pyx":160
* cdef np.ndarray[DTYPE_t, ndim=1] log_inclusion_probs
* to_pick_number = k
* subset_sum_product_probs = calc_normalization(log_weights, k) # <<<<<<<<<<<<<<
* log_inclusion_probs = calc_log_inclusion_probs(log_weights, subset_sum_product_probs, k)
* for i in range(n, 0, -1):
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_calc_normalization); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_8 = __Pyx_PyInt_From_int(__pyx_v_k); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_7 = NULL;
__pyx_t_2 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_2 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, ((PyObject *)__pyx_v_log_weights), __pyx_t_8};
__pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, ((PyObject *)__pyx_v_log_weights), __pyx_t_8};
__pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
} else
#endif
{
__pyx_t_6 = PyTuple_New(2+__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_INCREF(((PyObject *)__pyx_v_log_weights));
__Pyx_GIVEREF(((PyObject *)__pyx_v_log_weights));
PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_2, ((PyObject *)__pyx_v_log_weights));
__Pyx_GIVEREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_2, __pyx_t_8);
__pyx_t_8 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 160, __pyx_L1_error)
__pyx_t_16 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer);
__pyx_t_2 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer, (PyObject*)__pyx_t_16, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_2 < 0)) {
PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer, (PyObject*)__pyx_v_subset_sum_product_probs, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14);
}
__pyx_t_12 = __pyx_t_13 = __pyx_t_14 = 0;
}
__pyx_pybuffernd_subset_sum_product_probs.diminfo[0].strides = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[0].shape = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].strides = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].shape = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 160, __pyx_L1_error)
}
__pyx_t_16 = 0;
__pyx_v_subset_sum_product_probs = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":161
* to_pick_number = k
* subset_sum_product_probs = calc_normalization(log_weights, k)
* log_inclusion_probs = calc_log_inclusion_probs(log_weights, subset_sum_product_probs, k) # <<<<<<<<<<<<<<
* for i in range(n, 0, -1):
* thresh = log_weights[i - 1] + subset_sum_product_probs[to_pick_number - 1, i - 1] - subset_sum_product_probs[
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_calc_log_inclusion_probs); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_k); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
__pyx_t_2 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_2 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[4] = {__pyx_t_8, ((PyObject *)__pyx_v_log_weights), ((PyObject *)__pyx_v_subset_sum_product_probs), __pyx_t_6};
__pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_2, 3+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[4] = {__pyx_t_8, ((PyObject *)__pyx_v_log_weights), ((PyObject *)__pyx_v_subset_sum_product_probs), __pyx_t_6};
__pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_2, 3+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_7 = PyTuple_New(3+__pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_8) {
__Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8); __pyx_t_8 = NULL;
}
__Pyx_INCREF(((PyObject *)__pyx_v_log_weights));
__Pyx_GIVEREF(((PyObject *)__pyx_v_log_weights));
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_2, ((PyObject *)__pyx_v_log_weights));
__Pyx_INCREF(((PyObject *)__pyx_v_subset_sum_product_probs));
__Pyx_GIVEREF(((PyObject *)__pyx_v_subset_sum_product_probs));
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_2, ((PyObject *)__pyx_v_subset_sum_product_probs));
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_7, 2+__pyx_t_2, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 161, __pyx_L1_error)
__pyx_t_17 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer);
__pyx_t_2 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_2 < 0)) {
PyErr_Fetch(&__pyx_t_14, &__pyx_t_13, &__pyx_t_12);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer, (PyObject*)__pyx_v_log_inclusion_probs, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_12);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_14, __pyx_t_13, __pyx_t_12);
}
__pyx_t_14 = __pyx_t_13 = __pyx_t_12 = 0;
}
__pyx_pybuffernd_log_inclusion_probs.diminfo[0].strides = __pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_log_inclusion_probs.diminfo[0].shape = __pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 161, __pyx_L1_error)
}
__pyx_t_17 = 0;
__pyx_v_log_inclusion_probs = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":162
* subset_sum_product_probs = calc_normalization(log_weights, k)
* log_inclusion_probs = calc_log_inclusion_probs(log_weights, subset_sum_product_probs, k)
* for i in range(n, 0, -1): # <<<<<<<<<<<<<<
* thresh = log_weights[i - 1] + subset_sum_product_probs[to_pick_number - 1, i - 1] - subset_sum_product_probs[
* to_pick_number, i]
*/
for (__pyx_t_4 = __pyx_v_n; __pyx_t_4 > 0; __pyx_t_4-=1) {
__pyx_v_i = __pyx_t_4;
/* "fairseq/cps_dp.pyx":163
* log_inclusion_probs = calc_log_inclusion_probs(log_weights, subset_sum_product_probs, k)
* for i in range(n, 0, -1):
* thresh = log_weights[i - 1] + subset_sum_product_probs[to_pick_number - 1, i - 1] - subset_sum_product_probs[ # <<<<<<<<<<<<<<
* to_pick_number, i]
* if isnan(thresh):
*/
__pyx_t_18 = (__pyx_v_i - 1);
__pyx_t_19 = (__pyx_v_to_pick_number - 1);
__pyx_t_20 = (__pyx_v_i - 1);
/* "fairseq/cps_dp.pyx":164
* for i in range(n, 0, -1):
* thresh = log_weights[i - 1] + subset_sum_product_probs[to_pick_number - 1, i - 1] - subset_sum_product_probs[
* to_pick_number, i] # <<<<<<<<<<<<<<
* if isnan(thresh):
* thresh = 0
*/
__pyx_t_21 = __pyx_v_to_pick_number;
__pyx_t_22 = __pyx_v_i;
/* "fairseq/cps_dp.pyx":163
* log_inclusion_probs = calc_log_inclusion_probs(log_weights, subset_sum_product_probs, k)
* for i in range(n, 0, -1):
* thresh = log_weights[i - 1] + subset_sum_product_probs[to_pick_number - 1, i - 1] - subset_sum_product_probs[ # <<<<<<<<<<<<<<
* to_pick_number, i]
* if isnan(thresh):
*/
__pyx_v_thresh = (((*__Pyx_BufPtrStrided1d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_log_weights.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_log_weights.diminfo[0].strides)) + (*__Pyx_BufPtrStrided2d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_subset_sum_product_probs.diminfo[0].strides, __pyx_t_20, __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].strides))) - (*__Pyx_BufPtrStrided2d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_subset_sum_product_probs.diminfo[0].strides, __pyx_t_22, __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].strides)));
/* "fairseq/cps_dp.pyx":165
* thresh = log_weights[i - 1] + subset_sum_product_probs[to_pick_number - 1, i - 1] - subset_sum_product_probs[
* to_pick_number, i]
* if isnan(thresh): # <<<<<<<<<<<<<<
* thresh = 0
* if thresholds[i - 1] < thresh:
*/
__pyx_t_23 = (isnan(__pyx_v_thresh) != 0);
if (__pyx_t_23) {
/* "fairseq/cps_dp.pyx":166
* to_pick_number, i]
* if isnan(thresh):
* thresh = 0 # <<<<<<<<<<<<<<
* if thresholds[i - 1] < thresh:
* samples_idx.append(selected_inds[i - 1])
*/
__pyx_v_thresh = 0.0;
/* "fairseq/cps_dp.pyx":165
* thresh = log_weights[i - 1] + subset_sum_product_probs[to_pick_number - 1, i - 1] - subset_sum_product_probs[
* to_pick_number, i]
* if isnan(thresh): # <<<<<<<<<<<<<<
* thresh = 0
* if thresholds[i - 1] < thresh:
*/
}
/* "fairseq/cps_dp.pyx":167
* if isnan(thresh):
* thresh = 0
* if thresholds[i - 1] < thresh: # <<<<<<<<<<<<<<
* samples_idx.append(selected_inds[i - 1])
* selected_incs.append(log_inclusion_probs[i - 1])
*/
__pyx_t_22 = (__pyx_v_i - 1);
__pyx_t_23 = (((*__Pyx_BufPtrStrided1d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_thresholds.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_thresholds.diminfo[0].strides)) < __pyx_v_thresh) != 0);
if (__pyx_t_23) {
/* "fairseq/cps_dp.pyx":168
* thresh = 0
* if thresholds[i - 1] < thresh:
* samples_idx.append(selected_inds[i - 1]) # <<<<<<<<<<<<<<
* selected_incs.append(log_inclusion_probs[i - 1])
* to_pick_number -= 1
*/
__pyx_t_22 = (__pyx_v_i - 1);
__pyx_t_9 = __Pyx_PyInt_From_npy_int64((*__Pyx_BufPtrStrided1d(__pyx_t_7fairseq_6cps_dp_DTYPE_int_t *, __pyx_pybuffernd_selected_inds.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_selected_inds.diminfo[0].strides))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 168, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_24 = __Pyx_PyList_Append(__pyx_v_samples_idx, __pyx_t_9); if (unlikely(__pyx_t_24 == ((int)-1))) __PYX_ERR(0, 168, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":169
* if thresholds[i - 1] < thresh:
* samples_idx.append(selected_inds[i - 1])
* selected_incs.append(log_inclusion_probs[i - 1]) # <<<<<<<<<<<<<<
* to_pick_number -= 1
* if to_pick_number == 0:
*/
__pyx_t_22 = (__pyx_v_i - 1);
__pyx_t_9 = PyFloat_FromDouble((*__Pyx_BufPtrStrided1d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_log_inclusion_probs.diminfo[0].strides))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 169, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_24 = __Pyx_PyList_Append(__pyx_v_selected_incs, __pyx_t_9); if (unlikely(__pyx_t_24 == ((int)-1))) __PYX_ERR(0, 169, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":170
* samples_idx.append(selected_inds[i - 1])
* selected_incs.append(log_inclusion_probs[i - 1])
* to_pick_number -= 1 # <<<<<<<<<<<<<<
* if to_pick_number == 0:
* break
*/
__pyx_v_to_pick_number = (__pyx_v_to_pick_number - 1);
/* "fairseq/cps_dp.pyx":171
* selected_incs.append(log_inclusion_probs[i - 1])
* to_pick_number -= 1
* if to_pick_number == 0: # <<<<<<<<<<<<<<
* break
* return np.asarray(samples_idx), np.asarray(selected_incs)
*/
__pyx_t_23 = ((__pyx_v_to_pick_number == 0) != 0);
if (__pyx_t_23) {
/* "fairseq/cps_dp.pyx":172
* to_pick_number -= 1
* if to_pick_number == 0:
* break # <<<<<<<<<<<<<<
* return np.asarray(samples_idx), np.asarray(selected_incs)
*
*/
goto __pyx_L4_break;
/* "fairseq/cps_dp.pyx":171
* selected_incs.append(log_inclusion_probs[i - 1])
* to_pick_number -= 1
* if to_pick_number == 0: # <<<<<<<<<<<<<<
* break
* return np.asarray(samples_idx), np.asarray(selected_incs)
*/
}
/* "fairseq/cps_dp.pyx":167
* if isnan(thresh):
* thresh = 0
* if thresholds[i - 1] < thresh: # <<<<<<<<<<<<<<
* samples_idx.append(selected_inds[i - 1])
* selected_incs.append(log_inclusion_probs[i - 1])
*/
}
}
__pyx_L4_break:;
/* "fairseq/cps_dp.pyx":173
* if to_pick_number == 0:
* break
* return np.asarray(samples_idx), np.asarray(selected_incs) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 173, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_asarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 173, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_9 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_5, __pyx_v_samples_idx) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_v_samples_idx);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 173, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 173, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_asarray); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 173, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
}
}
__pyx_t_7 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_5, __pyx_v_selected_incs) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_selected_incs);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 173, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 173, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_7);
__pyx_t_9 = 0;
__pyx_t_7 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":133
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def sample(np.ndarray[DTYPE_t, ndim=1] logp, np.ndarray[DTYPE_int_t, ndim=1] selected_inds, int k): # <<<<<<<<<<<<<<
* """
* This function picks a sample of size k from candidates
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_weights.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_logp.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_selected_inds.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_thresholds.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("fairseq.cps_dp.sample", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_weights.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_logp.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_selected_inds.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_thresholds.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF(__pyx_v_samples_idx);
__Pyx_XDECREF(__pyx_v_selected_incs);
__Pyx_XDECREF((PyObject *)__pyx_v_thresholds);
__Pyx_XDECREF((PyObject *)__pyx_v_log_weights);
__Pyx_XDECREF((PyObject *)__pyx_v_log_prob_filtered);
__Pyx_XDECREF((PyObject *)__pyx_v_subset_sum_product_probs);
__Pyx_XDECREF((PyObject *)__pyx_v_log_inclusion_probs);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "fairseq/cps_dp.pyx":178
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def sampford_sample(np.ndarray[DTYPE_t, ndim=1] logp, np.ndarray[DTYPE_int_t, ndim=1] selected_inds, int k): # <<<<<<<<<<<<<<
* cdef long n = len(logp)
* k = min(n, k)
*/
/* Python wrapper */
static PyObject *__pyx_pw_7fairseq_6cps_dp_7sampford_sample(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_7fairseq_6cps_dp_7sampford_sample = {"sampford_sample", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7fairseq_6cps_dp_7sampford_sample, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_7fairseq_6cps_dp_7sampford_sample(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_logp = 0;
PyArrayObject *__pyx_v_selected_inds = 0;
int __pyx_v_k;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("sampford_sample (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_logp,&__pyx_n_s_selected_inds,&__pyx_n_s_k,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_logp)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_selected_inds)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("sampford_sample", 1, 3, 3, 1); __PYX_ERR(0, 178, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_k)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("sampford_sample", 1, 3, 3, 2); __PYX_ERR(0, 178, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "sampford_sample") < 0)) __PYX_ERR(0, 178, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_logp = ((PyArrayObject *)values[0]);
__pyx_v_selected_inds = ((PyArrayObject *)values[1]);
__pyx_v_k = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_k == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 178, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("sampford_sample", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 178, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("fairseq.cps_dp.sampford_sample", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_logp), __pyx_ptype_5numpy_ndarray, 1, "logp", 0))) __PYX_ERR(0, 178, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_selected_inds), __pyx_ptype_5numpy_ndarray, 1, "selected_inds", 0))) __PYX_ERR(0, 178, __pyx_L1_error)
__pyx_r = __pyx_pf_7fairseq_6cps_dp_6sampford_sample(__pyx_self, __pyx_v_logp, __pyx_v_selected_inds, __pyx_v_k);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7fairseq_6cps_dp_6sampford_sample(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_logp, PyArrayObject *__pyx_v_selected_inds, int __pyx_v_k) {
long __pyx_v_n;
CYTHON_UNUSED PyArrayObject *__pyx_v_thresholds = 0;
PyArrayObject *__pyx_v_log_weights = 0;
PyArrayObject *__pyx_v_log_prob_filtered = 0;
PyArrayObject *__pyx_v_subset_sum_product_probs = 0;
CYTHON_UNUSED int __pyx_v_to_pick_number;
PyArrayObject *__pyx_v_log_inclusion_probs = 0;
PyObject *__pyx_v_inclusion_probs = NULL;
int __pyx_v_j;
int __pyx_v_b;
PyObject *__pyx_v_samples_idx = NULL;
PyObject *__pyx_v_selected_incs = NULL;
PyObject *__pyx_v_counter = NULL;
PyObject *__pyx_v_replacement = NULL;
PyObject *__pyx_v_tmp = NULL;
__Pyx_LocalBuf_ND __pyx_pybuffernd_log_inclusion_probs;
__Pyx_Buffer __pyx_pybuffer_log_inclusion_probs;
__Pyx_LocalBuf_ND __pyx_pybuffernd_log_prob_filtered;
__Pyx_Buffer __pyx_pybuffer_log_prob_filtered;
__Pyx_LocalBuf_ND __pyx_pybuffernd_log_weights;
__Pyx_Buffer __pyx_pybuffer_log_weights;
__Pyx_LocalBuf_ND __pyx_pybuffernd_logp;
__Pyx_Buffer __pyx_pybuffer_logp;
__Pyx_LocalBuf_ND __pyx_pybuffernd_selected_inds;
__Pyx_Buffer __pyx_pybuffer_selected_inds;
__Pyx_LocalBuf_ND __pyx_pybuffernd_subset_sum_product_probs;
__Pyx_Buffer __pyx_pybuffer_subset_sum_product_probs;
__Pyx_LocalBuf_ND __pyx_pybuffernd_thresholds;
__Pyx_Buffer __pyx_pybuffer_thresholds;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyArrayObject *__pyx_t_10 = NULL;
PyArrayObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
PyObject *__pyx_t_13 = NULL;
PyObject *__pyx_t_14 = NULL;
PyArrayObject *__pyx_t_15 = NULL;
PyArrayObject *__pyx_t_16 = NULL;
PyArrayObject *__pyx_t_17 = NULL;
int __pyx_t_18;
PyObject *(*__pyx_t_19)(PyObject *);
int __pyx_t_20;
Py_ssize_t __pyx_t_21;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("sampford_sample", 0);
__pyx_pybuffer_thresholds.pybuffer.buf = NULL;
__pyx_pybuffer_thresholds.refcount = 0;
__pyx_pybuffernd_thresholds.data = NULL;
__pyx_pybuffernd_thresholds.rcbuffer = &__pyx_pybuffer_thresholds;
__pyx_pybuffer_log_weights.pybuffer.buf = NULL;
__pyx_pybuffer_log_weights.refcount = 0;
__pyx_pybuffernd_log_weights.data = NULL;
__pyx_pybuffernd_log_weights.rcbuffer = &__pyx_pybuffer_log_weights;
__pyx_pybuffer_log_prob_filtered.pybuffer.buf = NULL;
__pyx_pybuffer_log_prob_filtered.refcount = 0;
__pyx_pybuffernd_log_prob_filtered.data = NULL;
__pyx_pybuffernd_log_prob_filtered.rcbuffer = &__pyx_pybuffer_log_prob_filtered;
__pyx_pybuffer_subset_sum_product_probs.pybuffer.buf = NULL;
__pyx_pybuffer_subset_sum_product_probs.refcount = 0;
__pyx_pybuffernd_subset_sum_product_probs.data = NULL;
__pyx_pybuffernd_subset_sum_product_probs.rcbuffer = &__pyx_pybuffer_subset_sum_product_probs;
__pyx_pybuffer_log_inclusion_probs.pybuffer.buf = NULL;
__pyx_pybuffer_log_inclusion_probs.refcount = 0;
__pyx_pybuffernd_log_inclusion_probs.data = NULL;
__pyx_pybuffernd_log_inclusion_probs.rcbuffer = &__pyx_pybuffer_log_inclusion_probs;
__pyx_pybuffer_logp.pybuffer.buf = NULL;
__pyx_pybuffer_logp.refcount = 0;
__pyx_pybuffernd_logp.data = NULL;
__pyx_pybuffernd_logp.rcbuffer = &__pyx_pybuffer_logp;
__pyx_pybuffer_selected_inds.pybuffer.buf = NULL;
__pyx_pybuffer_selected_inds.refcount = 0;
__pyx_pybuffernd_selected_inds.data = NULL;
__pyx_pybuffernd_selected_inds.rcbuffer = &__pyx_pybuffer_selected_inds;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_logp.rcbuffer->pybuffer, (PyObject*)__pyx_v_logp, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 178, __pyx_L1_error)
}
__pyx_pybuffernd_logp.diminfo[0].strides = __pyx_pybuffernd_logp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_logp.diminfo[0].shape = __pyx_pybuffernd_logp.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_selected_inds.rcbuffer->pybuffer, (PyObject*)__pyx_v_selected_inds, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_int_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 178, __pyx_L1_error)
}
__pyx_pybuffernd_selected_inds.diminfo[0].strides = __pyx_pybuffernd_selected_inds.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_selected_inds.diminfo[0].shape = __pyx_pybuffernd_selected_inds.rcbuffer->pybuffer.shape[0];
/* "fairseq/cps_dp.pyx":179
* @cython.wraparound(False)
* def sampford_sample(np.ndarray[DTYPE_t, ndim=1] logp, np.ndarray[DTYPE_int_t, ndim=1] selected_inds, int k):
* cdef long n = len(logp) # <<<<<<<<<<<<<<
* k = min(n, k)
*
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_logp)); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 179, __pyx_L1_error)
__pyx_v_n = __pyx_t_1;
/* "fairseq/cps_dp.pyx":180
* def sampford_sample(np.ndarray[DTYPE_t, ndim=1] logp, np.ndarray[DTYPE_int_t, ndim=1] selected_inds, int k):
* cdef long n = len(logp)
* k = min(n, k) # <<<<<<<<<<<<<<
*
* cdef np.ndarray[DTYPE_t, ndim=1] thresholds = np.log(np.random.uniform(size=n))
*/
__pyx_t_2 = __pyx_v_k;
__pyx_t_3 = __pyx_v_n;
if (((__pyx_t_2 < __pyx_t_3) != 0)) {
__pyx_t_4 = __pyx_t_2;
} else {
__pyx_t_4 = __pyx_t_3;
}
__pyx_v_k = __pyx_t_4;
/* "fairseq/cps_dp.pyx":182
* k = min(n, k)
*
* cdef np.ndarray[DTYPE_t, ndim=1] thresholds = np.log(np.random.uniform(size=n)) # <<<<<<<<<<<<<<
*
* cdef np.ndarray[DTYPE_t, ndim=1] log_weights # using odds approximation as weights
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_log); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_random); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_uniform); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_9 = __Pyx_PyInt_From_long(__pyx_v_n); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (PyDict_SetItem(__pyx_t_8, __pyx_n_s_size, __pyx_t_9) < 0) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_empty_tuple, __pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_5 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_9) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_9);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 182, __pyx_L1_error)
__pyx_t_10 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_thresholds.rcbuffer->pybuffer, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
__pyx_v_thresholds = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_thresholds.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 182, __pyx_L1_error)
} else {__pyx_pybuffernd_thresholds.diminfo[0].strides = __pyx_pybuffernd_thresholds.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_thresholds.diminfo[0].shape = __pyx_pybuffernd_thresholds.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_10 = 0;
__pyx_v_thresholds = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":186
* cdef np.ndarray[DTYPE_t, ndim=1] log_weights # using odds approximation as weights
* cdef np.ndarray[DTYPE_t, ndim=1] log_prob_filtered
* log_prob_filtered = logp.copy() # <<<<<<<<<<<<<<
* log_prob_filtered[log_prob_filtered > 0.99] = 0.99 # clipping in order to prevent NAN generation
* log_weights = log_prob_filtered - np.array(list(map(log1mexp, log_prob_filtered)))
*/
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_logp), __pyx_n_s_copy); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 186, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_5 = (__pyx_t_9) ? __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_9) : __Pyx_PyObject_CallNoArg(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 186, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 186, __pyx_L1_error)
__pyx_t_11 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer);
__pyx_t_2 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer, (PyObject*)__pyx_t_11, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_2 < 0)) {
PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer, (PyObject*)__pyx_v_log_prob_filtered, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14);
}
__pyx_t_12 = __pyx_t_13 = __pyx_t_14 = 0;
}
__pyx_pybuffernd_log_prob_filtered.diminfo[0].strides = __pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_log_prob_filtered.diminfo[0].shape = __pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 186, __pyx_L1_error)
}
__pyx_t_11 = 0;
__pyx_v_log_prob_filtered = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":187
* cdef np.ndarray[DTYPE_t, ndim=1] log_prob_filtered
* log_prob_filtered = logp.copy()
* log_prob_filtered[log_prob_filtered > 0.99] = 0.99 # clipping in order to prevent NAN generation # <<<<<<<<<<<<<<
* log_weights = log_prob_filtered - np.array(list(map(log1mexp, log_prob_filtered)))
*
*/
__pyx_t_5 = PyObject_RichCompare(((PyObject *)__pyx_v_log_prob_filtered), __pyx_float_0_99, Py_GT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 187, __pyx_L1_error)
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_log_prob_filtered), __pyx_t_5, __pyx_float_0_99) < 0)) __PYX_ERR(0, 187, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":188
* log_prob_filtered = logp.copy()
* log_prob_filtered[log_prob_filtered > 0.99] = 0.99 # clipping in order to prevent NAN generation
* log_weights = log_prob_filtered - np.array(list(map(log1mexp, log_prob_filtered))) # <<<<<<<<<<<<<<
*
* cdef long i
*/
__Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_array); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = __Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(__pyx_f_7fairseq_6cps_dp_log1mexp); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_7);
__Pyx_INCREF(((PyObject *)__pyx_v_log_prob_filtered));
__Pyx_GIVEREF(((PyObject *)__pyx_v_log_prob_filtered));
PyTuple_SET_ITEM(__pyx_t_8, 1, ((PyObject *)__pyx_v_log_prob_filtered));
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_map, __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = PySequence_List(__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_9);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_9, function);
}
}
__pyx_t_5 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_9, __pyx_t_7, __pyx_t_8) : __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_8);
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyNumber_Subtract(((PyObject *)__pyx_v_log_prob_filtered), __pyx_t_5); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 188, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 188, __pyx_L1_error)
__pyx_t_15 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_weights.rcbuffer->pybuffer);
__pyx_t_2 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_weights.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_2 < 0)) {
PyErr_Fetch(&__pyx_t_14, &__pyx_t_13, &__pyx_t_12);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_weights.rcbuffer->pybuffer, (PyObject*)__pyx_v_log_weights, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_12);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_14, __pyx_t_13, __pyx_t_12);
}
__pyx_t_14 = __pyx_t_13 = __pyx_t_12 = 0;
}
__pyx_pybuffernd_log_weights.diminfo[0].strides = __pyx_pybuffernd_log_weights.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_log_weights.diminfo[0].shape = __pyx_pybuffernd_log_weights.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 188, __pyx_L1_error)
}
__pyx_t_15 = 0;
__pyx_v_log_weights = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":195
* cdef int to_pick_number
* cdef np.ndarray[DTYPE_t, ndim=1] log_inclusion_probs
* to_pick_number = k # <<<<<<<<<<<<<<
* subset_sum_product_probs = calc_normalization(log_weights, k)
* log_inclusion_probs = calc_log_inclusion_probs(log_weights, subset_sum_product_probs, k)
*/
__pyx_v_to_pick_number = __pyx_v_k;
/* "fairseq/cps_dp.pyx":196
* cdef np.ndarray[DTYPE_t, ndim=1] log_inclusion_probs
* to_pick_number = k
* subset_sum_product_probs = calc_normalization(log_weights, k) # <<<<<<<<<<<<<<
* log_inclusion_probs = calc_log_inclusion_probs(log_weights, subset_sum_product_probs, k)
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_calc_normalization); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 196, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_8 = __Pyx_PyInt_From_int(__pyx_v_k); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 196, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_7 = NULL;
__pyx_t_2 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_2 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, ((PyObject *)__pyx_v_log_weights), __pyx_t_8};
__pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 196, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, ((PyObject *)__pyx_v_log_weights), __pyx_t_8};
__pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 196, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
} else
#endif
{
__pyx_t_6 = PyTuple_New(2+__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 196, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_INCREF(((PyObject *)__pyx_v_log_weights));
__Pyx_GIVEREF(((PyObject *)__pyx_v_log_weights));
PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_2, ((PyObject *)__pyx_v_log_weights));
__Pyx_GIVEREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_2, __pyx_t_8);
__pyx_t_8 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 196, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 196, __pyx_L1_error)
__pyx_t_16 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer);
__pyx_t_2 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer, (PyObject*)__pyx_t_16, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_2 < 0)) {
PyErr_Fetch(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer, (PyObject*)__pyx_v_subset_sum_product_probs, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_14);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_12, __pyx_t_13, __pyx_t_14);
}
__pyx_t_12 = __pyx_t_13 = __pyx_t_14 = 0;
}
__pyx_pybuffernd_subset_sum_product_probs.diminfo[0].strides = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[0].shape = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].strides = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_subset_sum_product_probs.diminfo[1].shape = __pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 196, __pyx_L1_error)
}
__pyx_t_16 = 0;
__pyx_v_subset_sum_product_probs = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":197
* to_pick_number = k
* subset_sum_product_probs = calc_normalization(log_weights, k)
* log_inclusion_probs = calc_log_inclusion_probs(log_weights, subset_sum_product_probs, k) # <<<<<<<<<<<<<<
*
* inclusion_probs = log_inclusion_probs.copy()
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_calc_log_inclusion_probs); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 197, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_k); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 197, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
__pyx_t_2 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_2 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[4] = {__pyx_t_8, ((PyObject *)__pyx_v_log_weights), ((PyObject *)__pyx_v_subset_sum_product_probs), __pyx_t_6};
__pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_2, 3+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 197, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[4] = {__pyx_t_8, ((PyObject *)__pyx_v_log_weights), ((PyObject *)__pyx_v_subset_sum_product_probs), __pyx_t_6};
__pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_2, 3+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 197, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_7 = PyTuple_New(3+__pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 197, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_8) {
__Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8); __pyx_t_8 = NULL;
}
__Pyx_INCREF(((PyObject *)__pyx_v_log_weights));
__Pyx_GIVEREF(((PyObject *)__pyx_v_log_weights));
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_2, ((PyObject *)__pyx_v_log_weights));
__Pyx_INCREF(((PyObject *)__pyx_v_subset_sum_product_probs));
__Pyx_GIVEREF(((PyObject *)__pyx_v_subset_sum_product_probs));
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_2, ((PyObject *)__pyx_v_subset_sum_product_probs));
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_7, 2+__pyx_t_2, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 197, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_9) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_9, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 197, __pyx_L1_error)
__pyx_t_17 = ((PyArrayObject *)__pyx_t_9);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer);
__pyx_t_2 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_2 < 0)) {
PyErr_Fetch(&__pyx_t_14, &__pyx_t_13, &__pyx_t_12);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer, (PyObject*)__pyx_v_log_inclusion_probs, &__Pyx_TypeInfo_nn___pyx_t_7fairseq_6cps_dp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_14); Py_XDECREF(__pyx_t_13); Py_XDECREF(__pyx_t_12);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_14, __pyx_t_13, __pyx_t_12);
}
__pyx_t_14 = __pyx_t_13 = __pyx_t_12 = 0;
}
__pyx_pybuffernd_log_inclusion_probs.diminfo[0].strides = __pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_log_inclusion_probs.diminfo[0].shape = __pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 197, __pyx_L1_error)
}
__pyx_t_17 = 0;
__pyx_v_log_inclusion_probs = ((PyArrayObject *)__pyx_t_9);
__pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":199
* log_inclusion_probs = calc_log_inclusion_probs(log_weights, subset_sum_product_probs, k)
*
* inclusion_probs = log_inclusion_probs.copy() # <<<<<<<<<<<<<<
* inclusion_probs[inclusion_probs==-inf] = -60
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1)
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_log_inclusion_probs), __pyx_n_s_copy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_7 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_9 = (__pyx_t_7) ? __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_7) : __Pyx_PyObject_CallNoArg(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_inclusion_probs = __pyx_t_9;
__pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":200
*
* inclusion_probs = log_inclusion_probs.copy()
* inclusion_probs[inclusion_probs==-inf] = -60 # <<<<<<<<<<<<<<
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1)
* inclusion_probs = np.exp(inclusion_probs)
*/
__Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_inf); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 200, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_5 = PyNumber_Negative(__pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 200, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyObject_RichCompare(__pyx_v_inclusion_probs, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 200, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_inclusion_probs, __pyx_t_9, __pyx_int_neg_60) < 0)) __PYX_ERR(0, 200, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":201
* inclusion_probs = log_inclusion_probs.copy()
* inclusion_probs[inclusion_probs==-inf] = -60
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1) # <<<<<<<<<<<<<<
* inclusion_probs = np.exp(inclusion_probs)
* inclusion_probs = np.nan_to_num(inclusion_probs)
*/
__Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 201, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_linalg); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 201, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_norm); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 201, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 201, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_inclusion_probs);
__Pyx_GIVEREF(__pyx_v_inclusion_probs);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_inclusion_probs);
__pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 201, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_ord, __pyx_int_1) < 0) __PYX_ERR(0, 201, __pyx_L1_error)
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_5, __pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 201, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyNumber_Divide(__pyx_v_inclusion_probs, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 201, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF_SET(__pyx_v_inclusion_probs, __pyx_t_7);
__pyx_t_7 = 0;
/* "fairseq/cps_dp.pyx":202
* inclusion_probs[inclusion_probs==-inf] = -60
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1)
* inclusion_probs = np.exp(inclusion_probs) # <<<<<<<<<<<<<<
* inclusion_probs = np.nan_to_num(inclusion_probs)
* if(np.sum(inclusion_probs)) == 0:
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 202, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_exp); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 202, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_7 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_v_inclusion_probs) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_inclusion_probs);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 202, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_inclusion_probs, __pyx_t_7);
__pyx_t_7 = 0;
/* "fairseq/cps_dp.pyx":203
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1)
* inclusion_probs = np.exp(inclusion_probs)
* inclusion_probs = np.nan_to_num(inclusion_probs) # <<<<<<<<<<<<<<
* if(np.sum(inclusion_probs)) == 0:
* inclusion_probs = np.random.random(size = k)
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 203, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_nan_to_num); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 203, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
}
}
__pyx_t_7 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_5, __pyx_v_inclusion_probs) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_inclusion_probs);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 203, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF_SET(__pyx_v_inclusion_probs, __pyx_t_7);
__pyx_t_7 = 0;
/* "fairseq/cps_dp.pyx":204
* inclusion_probs = np.exp(inclusion_probs)
* inclusion_probs = np.nan_to_num(inclusion_probs)
* if(np.sum(inclusion_probs)) == 0: # <<<<<<<<<<<<<<
* inclusion_probs = np.random.random(size = k)
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1)
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 204, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_sum); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 204, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_7 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_v_inclusion_probs) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_inclusion_probs);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 204, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyInt_EqObjC(__pyx_t_7, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 204, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_18 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_18 < 0)) __PYX_ERR(0, 204, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_18) {
/* "fairseq/cps_dp.pyx":205
* inclusion_probs = np.nan_to_num(inclusion_probs)
* if(np.sum(inclusion_probs)) == 0:
* inclusion_probs = np.random.random(size = k) # <<<<<<<<<<<<<<
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1)
* else:
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 205, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_random); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 205, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_random); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 205, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 205, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_k); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 205, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_size, __pyx_t_6) < 0) __PYX_ERR(0, 205, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_empty_tuple, __pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 205, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF_SET(__pyx_v_inclusion_probs, __pyx_t_6);
__pyx_t_6 = 0;
/* "fairseq/cps_dp.pyx":206
* if(np.sum(inclusion_probs)) == 0:
* inclusion_probs = np.random.random(size = k)
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1) # <<<<<<<<<<<<<<
* else:
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1)
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 206, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_linalg); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 206, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_norm); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 206, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 206, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_INCREF(__pyx_v_inclusion_probs);
__Pyx_GIVEREF(__pyx_v_inclusion_probs);
PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_v_inclusion_probs);
__pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 206, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_ord, __pyx_int_1) < 0) __PYX_ERR(0, 206, __pyx_L1_error)
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 206, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyNumber_Divide(__pyx_v_inclusion_probs, __pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 206, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF_SET(__pyx_v_inclusion_probs, __pyx_t_5);
__pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":204
* inclusion_probs = np.exp(inclusion_probs)
* inclusion_probs = np.nan_to_num(inclusion_probs)
* if(np.sum(inclusion_probs)) == 0: # <<<<<<<<<<<<<<
* inclusion_probs = np.random.random(size = k)
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1)
*/
goto __pyx_L3;
}
/* "fairseq/cps_dp.pyx":208
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1)
* else:
* inclusion_probs = inclusion_probs/np.linalg.norm(inclusion_probs,ord=1) # <<<<<<<<<<<<<<
*
* cdef int j
*/
/*else*/ {
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 208, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_linalg); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 208, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_norm); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 208, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 208, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_INCREF(__pyx_v_inclusion_probs);
__Pyx_GIVEREF(__pyx_v_inclusion_probs);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_v_inclusion_probs);
__pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 208, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_ord, __pyx_int_1) < 0) __PYX_ERR(0, 208, __pyx_L1_error)
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, __pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 208, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyNumber_Divide(__pyx_v_inclusion_probs, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 208, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF_SET(__pyx_v_inclusion_probs, __pyx_t_7);
__pyx_t_7 = 0;
}
__pyx_L3:;
/* "fairseq/cps_dp.pyx":213
* cdef int b
*
* if(k==len(logp)): # <<<<<<<<<<<<<<
* samples_idx, selected_incs = sample(logp, selected_inds, k)
* else:
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_logp)); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 213, __pyx_L1_error)
__pyx_t_18 = ((__pyx_v_k == __pyx_t_1) != 0);
if (__pyx_t_18) {
/* "fairseq/cps_dp.pyx":214
*
* if(k==len(logp)):
* samples_idx, selected_incs = sample(logp, selected_inds, k) # <<<<<<<<<<<<<<
* else:
* j = np.random.choice(selected_inds, 1, p=inclusion_probs)
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_sample); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 214, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_9 = __Pyx_PyInt_From_int(__pyx_v_k); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 214, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_5 = NULL;
__pyx_t_2 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_2 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[4] = {__pyx_t_5, ((PyObject *)__pyx_v_logp), ((PyObject *)__pyx_v_selected_inds), __pyx_t_9};
__pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_2, 3+__pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 214, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[4] = {__pyx_t_5, ((PyObject *)__pyx_v_logp), ((PyObject *)__pyx_v_selected_inds), __pyx_t_9};
__pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_2, 3+__pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 214, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(3+__pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 214, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_INCREF(((PyObject *)__pyx_v_logp));
__Pyx_GIVEREF(((PyObject *)__pyx_v_logp));
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_2, ((PyObject *)__pyx_v_logp));
__Pyx_INCREF(((PyObject *)__pyx_v_selected_inds));
__Pyx_GIVEREF(((PyObject *)__pyx_v_selected_inds));
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_2, ((PyObject *)__pyx_v_selected_inds));
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_8, 2+__pyx_t_2, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 214, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if ((likely(PyTuple_CheckExact(__pyx_t_7))) || (PyList_CheckExact(__pyx_t_7))) {
PyObject* sequence = __pyx_t_7;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(0, 214, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_6 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_8 = PyTuple_GET_ITEM(sequence, 1);
} else {
__pyx_t_6 = PyList_GET_ITEM(sequence, 0);
__pyx_t_8 = PyList_GET_ITEM(sequence, 1);
}
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(__pyx_t_8);
#else
__pyx_t_6 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 214, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 214, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
#endif
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
} else {
Py_ssize_t index = -1;
__pyx_t_9 = PyObject_GetIter(__pyx_t_7); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 214, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_19 = Py_TYPE(__pyx_t_9)->tp_iternext;
index = 0; __pyx_t_6 = __pyx_t_19(__pyx_t_9); if (unlikely(!__pyx_t_6)) goto __pyx_L5_unpacking_failed;
__Pyx_GOTREF(__pyx_t_6);
index = 1; __pyx_t_8 = __pyx_t_19(__pyx_t_9); if (unlikely(!__pyx_t_8)) goto __pyx_L5_unpacking_failed;
__Pyx_GOTREF(__pyx_t_8);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_19(__pyx_t_9), 2) < 0) __PYX_ERR(0, 214, __pyx_L1_error)
__pyx_t_19 = NULL;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L6_unpacking_done;
__pyx_L5_unpacking_failed:;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_19 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
__PYX_ERR(0, 214, __pyx_L1_error)
__pyx_L6_unpacking_done:;
}
__pyx_v_samples_idx = __pyx_t_6;
__pyx_t_6 = 0;
__pyx_v_selected_incs = __pyx_t_8;
__pyx_t_8 = 0;
/* "fairseq/cps_dp.pyx":213
* cdef int b
*
* if(k==len(logp)): # <<<<<<<<<<<<<<
* samples_idx, selected_incs = sample(logp, selected_inds, k)
* else:
*/
goto __pyx_L4;
}
/* "fairseq/cps_dp.pyx":216
* samples_idx, selected_incs = sample(logp, selected_inds, k)
* else:
* j = np.random.choice(selected_inds, 1, p=inclusion_probs) # <<<<<<<<<<<<<<
* samples_idx, selected_incs = sample(logp, selected_inds, k-1)
*
*/
/*else*/ {
__Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 216, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_random); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 216, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_choice); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 216, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 216, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(((PyObject *)__pyx_v_selected_inds));
__Pyx_GIVEREF(((PyObject *)__pyx_v_selected_inds));
PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_v_selected_inds));
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_int_1);
__pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 216, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_p, __pyx_v_inclusion_probs) < 0) __PYX_ERR(0, 216, __pyx_L1_error)
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, __pyx_t_6); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 216, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_9); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 216, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_j = __pyx_t_2;
/* "fairseq/cps_dp.pyx":217
* else:
* j = np.random.choice(selected_inds, 1, p=inclusion_probs)
* samples_idx, selected_incs = sample(logp, selected_inds, k-1) # <<<<<<<<<<<<<<
*
* b = 0
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_sample); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 217, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyInt_From_long((__pyx_v_k - 1)); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 217, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_7 = NULL;
__pyx_t_2 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_2 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[4] = {__pyx_t_7, ((PyObject *)__pyx_v_logp), ((PyObject *)__pyx_v_selected_inds), __pyx_t_8};
__pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_2, 3+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 217, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[4] = {__pyx_t_7, ((PyObject *)__pyx_v_logp), ((PyObject *)__pyx_v_selected_inds), __pyx_t_8};
__pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_2, 3+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 217, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
} else
#endif
{
__pyx_t_5 = PyTuple_New(3+__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 217, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_INCREF(((PyObject *)__pyx_v_logp));
__Pyx_GIVEREF(((PyObject *)__pyx_v_logp));
PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_2, ((PyObject *)__pyx_v_logp));
__Pyx_INCREF(((PyObject *)__pyx_v_selected_inds));
__Pyx_GIVEREF(((PyObject *)__pyx_v_selected_inds));
PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_2, ((PyObject *)__pyx_v_selected_inds));
__Pyx_GIVEREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_5, 2+__pyx_t_2, __pyx_t_8);
__pyx_t_8 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 217, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if ((likely(PyTuple_CheckExact(__pyx_t_9))) || (PyList_CheckExact(__pyx_t_9))) {
PyObject* sequence = __pyx_t_9;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(0, 217, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_6 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
} else {
__pyx_t_6 = PyList_GET_ITEM(sequence, 0);
__pyx_t_5 = PyList_GET_ITEM(sequence, 1);
}
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_6 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 217, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 217, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
Py_ssize_t index = -1;
__pyx_t_8 = PyObject_GetIter(__pyx_t_9); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 217, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_19 = Py_TYPE(__pyx_t_8)->tp_iternext;
index = 0; __pyx_t_6 = __pyx_t_19(__pyx_t_8); if (unlikely(!__pyx_t_6)) goto __pyx_L7_unpacking_failed;
__Pyx_GOTREF(__pyx_t_6);
index = 1; __pyx_t_5 = __pyx_t_19(__pyx_t_8); if (unlikely(!__pyx_t_5)) goto __pyx_L7_unpacking_failed;
__Pyx_GOTREF(__pyx_t_5);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_19(__pyx_t_8), 2) < 0) __PYX_ERR(0, 217, __pyx_L1_error)
__pyx_t_19 = NULL;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L8_unpacking_done;
__pyx_L7_unpacking_failed:;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_19 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
__PYX_ERR(0, 217, __pyx_L1_error)
__pyx_L8_unpacking_done:;
}
__pyx_v_samples_idx = __pyx_t_6;
__pyx_t_6 = 0;
__pyx_v_selected_incs = __pyx_t_5;
__pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":219
* samples_idx, selected_incs = sample(logp, selected_inds, k-1)
*
* b = 0 # <<<<<<<<<<<<<<
* if j in samples_idx:
* b = 1
*/
__pyx_v_b = 0;
/* "fairseq/cps_dp.pyx":220
*
* b = 0
* if j in samples_idx: # <<<<<<<<<<<<<<
* b = 1
*
*/
__pyx_t_9 = __Pyx_PyInt_From_int(__pyx_v_j); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 220, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_18 = (__Pyx_PySequence_ContainsTF(__pyx_t_9, __pyx_v_samples_idx, Py_EQ)); if (unlikely(__pyx_t_18 < 0)) __PYX_ERR(0, 220, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_20 = (__pyx_t_18 != 0);
if (__pyx_t_20) {
/* "fairseq/cps_dp.pyx":221
* b = 0
* if j in samples_idx:
* b = 1 # <<<<<<<<<<<<<<
*
* counter = 0
*/
__pyx_v_b = 1;
/* "fairseq/cps_dp.pyx":220
*
* b = 0
* if j in samples_idx: # <<<<<<<<<<<<<<
* b = 1
*
*/
}
/* "fairseq/cps_dp.pyx":223
* b = 1
*
* counter = 0 # <<<<<<<<<<<<<<
* while b==1:
* counter+=1
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_v_counter = __pyx_int_0;
/* "fairseq/cps_dp.pyx":224
*
* counter = 0
* while b==1: # <<<<<<<<<<<<<<
* counter+=1
* samples_idx, selected_incs = sample(logp, selected_inds, k-1)
*/
while (1) {
__pyx_t_20 = ((__pyx_v_b == 1) != 0);
if (!__pyx_t_20) break;
/* "fairseq/cps_dp.pyx":225
* counter = 0
* while b==1:
* counter+=1 # <<<<<<<<<<<<<<
* samples_idx, selected_incs = sample(logp, selected_inds, k-1)
* if j in samples_idx:
*/
__pyx_t_9 = __Pyx_PyInt_AddObjC(__pyx_v_counter, __pyx_int_1, 1, 1, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 225, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF_SET(__pyx_v_counter, __pyx_t_9);
__pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":226
* while b==1:
* counter+=1
* samples_idx, selected_incs = sample(logp, selected_inds, k-1) # <<<<<<<<<<<<<<
* if j in samples_idx:
* b = 1
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_sample); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_k - 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
__pyx_t_2 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_2 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[4] = {__pyx_t_8, ((PyObject *)__pyx_v_logp), ((PyObject *)__pyx_v_selected_inds), __pyx_t_6};
__pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_2, 3+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[4] = {__pyx_t_8, ((PyObject *)__pyx_v_logp), ((PyObject *)__pyx_v_selected_inds), __pyx_t_6};
__pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_2, 3+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_7 = PyTuple_New(3+__pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_8) {
__Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8); __pyx_t_8 = NULL;
}
__Pyx_INCREF(((PyObject *)__pyx_v_logp));
__Pyx_GIVEREF(((PyObject *)__pyx_v_logp));
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_2, ((PyObject *)__pyx_v_logp));
__Pyx_INCREF(((PyObject *)__pyx_v_selected_inds));
__Pyx_GIVEREF(((PyObject *)__pyx_v_selected_inds));
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_2, ((PyObject *)__pyx_v_selected_inds));
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_7, 2+__pyx_t_2, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if ((likely(PyTuple_CheckExact(__pyx_t_9))) || (PyList_CheckExact(__pyx_t_9))) {
PyObject* sequence = __pyx_t_9;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(0, 226, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_7 = PyTuple_GET_ITEM(sequence, 1);
} else {
__pyx_t_5 = PyList_GET_ITEM(sequence, 0);
__pyx_t_7 = PyList_GET_ITEM(sequence, 1);
}
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
#else
__pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_7 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
Py_ssize_t index = -1;
__pyx_t_6 = PyObject_GetIter(__pyx_t_9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 226, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_19 = Py_TYPE(__pyx_t_6)->tp_iternext;
index = 0; __pyx_t_5 = __pyx_t_19(__pyx_t_6); if (unlikely(!__pyx_t_5)) goto __pyx_L12_unpacking_failed;
__Pyx_GOTREF(__pyx_t_5);
index = 1; __pyx_t_7 = __pyx_t_19(__pyx_t_6); if (unlikely(!__pyx_t_7)) goto __pyx_L12_unpacking_failed;
__Pyx_GOTREF(__pyx_t_7);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_19(__pyx_t_6), 2) < 0) __PYX_ERR(0, 226, __pyx_L1_error)
__pyx_t_19 = NULL;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
goto __pyx_L13_unpacking_done;
__pyx_L12_unpacking_failed:;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_19 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
__PYX_ERR(0, 226, __pyx_L1_error)
__pyx_L13_unpacking_done:;
}
__Pyx_DECREF_SET(__pyx_v_samples_idx, __pyx_t_5);
__pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_selected_incs, __pyx_t_7);
__pyx_t_7 = 0;
/* "fairseq/cps_dp.pyx":227
* counter+=1
* samples_idx, selected_incs = sample(logp, selected_inds, k-1)
* if j in samples_idx: # <<<<<<<<<<<<<<
* b = 1
* if counter>=10:
*/
__pyx_t_9 = __Pyx_PyInt_From_int(__pyx_v_j); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 227, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_20 = (__Pyx_PySequence_ContainsTF(__pyx_t_9, __pyx_v_samples_idx, Py_EQ)); if (unlikely(__pyx_t_20 < 0)) __PYX_ERR(0, 227, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_18 = (__pyx_t_20 != 0);
if (__pyx_t_18) {
/* "fairseq/cps_dp.pyx":228
* samples_idx, selected_incs = sample(logp, selected_inds, k-1)
* if j in samples_idx:
* b = 1 # <<<<<<<<<<<<<<
* if counter>=10:
* replacement = selected_inds[selected_inds!=j]
*/
__pyx_v_b = 1;
/* "fairseq/cps_dp.pyx":229
* if j in samples_idx:
* b = 1
* if counter>=10: # <<<<<<<<<<<<<<
* replacement = selected_inds[selected_inds!=j]
* tmp = np.random.randint(low=0,high=len(replacement)-1)
*/
__pyx_t_9 = PyObject_RichCompare(__pyx_v_counter, __pyx_int_10, Py_GE); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 229, __pyx_L1_error)
__pyx_t_18 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_18 < 0)) __PYX_ERR(0, 229, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (__pyx_t_18) {
/* "fairseq/cps_dp.pyx":230
* b = 1
* if counter>=10:
* replacement = selected_inds[selected_inds!=j] # <<<<<<<<<<<<<<
* tmp = np.random.randint(low=0,high=len(replacement)-1)
* np.append(samples_idx,replacement[tmp])
*/
__pyx_t_9 = __Pyx_PyInt_From_int(__pyx_v_j); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 230, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_7 = PyObject_RichCompare(((PyObject *)__pyx_v_selected_inds), __pyx_t_9, Py_NE); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 230, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_selected_inds), __pyx_t_7); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 230, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF_SET(__pyx_v_replacement, __pyx_t_9);
__pyx_t_9 = 0;
/* "fairseq/cps_dp.pyx":231
* if counter>=10:
* replacement = selected_inds[selected_inds!=j]
* tmp = np.random.randint(low=0,high=len(replacement)-1) # <<<<<<<<<<<<<<
* np.append(samples_idx,replacement[tmp])
* np.append(selected_incs,log_inclusion_probs[tmp])
*/
__Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 231, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_random); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 231, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_randint); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 231, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 231, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_low, __pyx_int_0) < 0) __PYX_ERR(0, 231, __pyx_L1_error)
__pyx_t_1 = PyObject_Length(__pyx_v_replacement); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 231, __pyx_L1_error)
__pyx_t_5 = PyInt_FromSsize_t((__pyx_t_1 - 1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 231, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_high, __pyx_t_5) < 0) __PYX_ERR(0, 231, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_empty_tuple, __pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 231, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF_SET(__pyx_v_tmp, __pyx_t_5);
__pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":232
* replacement = selected_inds[selected_inds!=j]
* tmp = np.random.randint(low=0,high=len(replacement)-1)
* np.append(samples_idx,replacement[tmp]) # <<<<<<<<<<<<<<
* np.append(selected_incs,log_inclusion_probs[tmp])
* b=0
*/
__Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 232, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_append); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 232, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_GetItem(__pyx_v_replacement, __pyx_v_tmp); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 232, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_6 = NULL;
__pyx_t_2 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_9);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_9, function);
__pyx_t_2 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_9)) {
PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_samples_idx, __pyx_t_7};
__pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 232, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) {
PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_samples_idx, __pyx_t_7};
__pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 232, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 232, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_6) {
__Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL;
}
__Pyx_INCREF(__pyx_v_samples_idx);
__Pyx_GIVEREF(__pyx_v_samples_idx);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_2, __pyx_v_samples_idx);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_2, __pyx_t_7);
__pyx_t_7 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 232, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":233
* tmp = np.random.randint(low=0,high=len(replacement)-1)
* np.append(samples_idx,replacement[tmp])
* np.append(selected_incs,log_inclusion_probs[tmp]) # <<<<<<<<<<<<<<
* b=0
* else:
*/
__Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_append); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_log_inclusion_probs), __pyx_v_tmp); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_7 = NULL;
__pyx_t_2 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_8))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_8);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_8, function);
__pyx_t_2 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_8)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_v_selected_incs, __pyx_t_9};
__pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_8, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 233, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_8)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_v_selected_incs, __pyx_t_9};
__pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_8, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 233, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else
#endif
{
__pyx_t_6 = PyTuple_New(2+__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_INCREF(__pyx_v_selected_incs);
__Pyx_GIVEREF(__pyx_v_selected_incs);
PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_2, __pyx_v_selected_incs);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_2, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":234
* np.append(samples_idx,replacement[tmp])
* np.append(selected_incs,log_inclusion_probs[tmp])
* b=0 # <<<<<<<<<<<<<<
* else:
* b = 0
*/
__pyx_v_b = 0;
/* "fairseq/cps_dp.pyx":229
* if j in samples_idx:
* b = 1
* if counter>=10: # <<<<<<<<<<<<<<
* replacement = selected_inds[selected_inds!=j]
* tmp = np.random.randint(low=0,high=len(replacement)-1)
*/
}
/* "fairseq/cps_dp.pyx":227
* counter+=1
* samples_idx, selected_incs = sample(logp, selected_inds, k-1)
* if j in samples_idx: # <<<<<<<<<<<<<<
* b = 1
* if counter>=10:
*/
goto __pyx_L14;
}
/* "fairseq/cps_dp.pyx":236
* b=0
* else:
* b = 0 # <<<<<<<<<<<<<<
* np.append(samples_idx,j)
* np.append(selected_incs,log_inclusion_probs[j])
*/
/*else*/ {
__pyx_v_b = 0;
/* "fairseq/cps_dp.pyx":237
* else:
* b = 0
* np.append(samples_idx,j) # <<<<<<<<<<<<<<
* np.append(selected_incs,log_inclusion_probs[j])
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_append); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = __Pyx_PyInt_From_int(__pyx_v_j); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_9 = NULL;
__pyx_t_2 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_2 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_v_samples_idx, __pyx_t_8};
__pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 237, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_v_samples_idx, __pyx_t_8};
__pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 237, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
} else
#endif
{
__pyx_t_7 = PyTuple_New(2+__pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_9) {
__Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_9); __pyx_t_9 = NULL;
}
__Pyx_INCREF(__pyx_v_samples_idx);
__Pyx_GIVEREF(__pyx_v_samples_idx);
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_2, __pyx_v_samples_idx);
__Pyx_GIVEREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_2, __pyx_t_8);
__pyx_t_8 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":238
* b = 0
* np.append(samples_idx,j)
* np.append(selected_incs,log_inclusion_probs[j]) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_append); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_21 = __pyx_v_j;
__pyx_t_6 = PyFloat_FromDouble((*__Pyx_BufPtrStrided1d(__pyx_t_7fairseq_6cps_dp_DTYPE_t *, __pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_log_inclusion_probs.diminfo[0].strides))); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
__pyx_t_2 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
__pyx_t_2 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_7)) {
PyObject *__pyx_temp[3] = {__pyx_t_8, __pyx_v_selected_incs, __pyx_t_6};
__pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 238, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) {
PyObject *__pyx_temp[3] = {__pyx_t_8, __pyx_v_selected_incs, __pyx_t_6};
__pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_2, 2+__pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 238, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_8) {
__Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8); __pyx_t_8 = NULL;
}
__Pyx_INCREF(__pyx_v_selected_incs);
__Pyx_GIVEREF(__pyx_v_selected_incs);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_2, __pyx_v_selected_incs);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_2, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 238, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_L14:;
}
}
__pyx_L4:;
/* "fairseq/cps_dp.pyx":243
*
*
* samples_idx = samples_idx.astype(int) # <<<<<<<<<<<<<<
*
* return np.asarray(samples_idx), np.asarray(selected_incs)
*/
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_samples_idx, __pyx_n_s_astype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 243, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_5 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_9, ((PyObject *)(&PyInt_Type))) : __Pyx_PyObject_CallOneArg(__pyx_t_7, ((PyObject *)(&PyInt_Type)));
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 243, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF_SET(__pyx_v_samples_idx, __pyx_t_5);
__pyx_t_5 = 0;
/* "fairseq/cps_dp.pyx":245
* samples_idx = samples_idx.astype(int)
*
* return np.asarray(samples_idx), np.asarray(selected_incs) # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_asarray); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_9);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_9, function);
}
}
__pyx_t_5 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_9, __pyx_t_7, __pyx_v_samples_idx) : __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_v_samples_idx);
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_asarray); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
}
}
__pyx_t_9 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_v_selected_incs) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_selected_incs);
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 245, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_9);
__pyx_t_5 = 0;
__pyx_t_9 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "fairseq/cps_dp.pyx":178
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def sampford_sample(np.ndarray[DTYPE_t, ndim=1] logp, np.ndarray[DTYPE_int_t, ndim=1] selected_inds, int k): # <<<<<<<<<<<<<<
* cdef long n = len(logp)
* k = min(n, k)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_weights.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_logp.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_selected_inds.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_thresholds.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("fairseq.cps_dp.sampford_sample", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_inclusion_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_prob_filtered.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_log_weights.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_logp.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_selected_inds.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_subset_sum_product_probs.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_thresholds.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_thresholds);
__Pyx_XDECREF((PyObject *)__pyx_v_log_weights);
__Pyx_XDECREF((PyObject *)__pyx_v_log_prob_filtered);
__Pyx_XDECREF((PyObject *)__pyx_v_subset_sum_product_probs);
__Pyx_XDECREF((PyObject *)__pyx_v_log_inclusion_probs);
__Pyx_XDECREF(__pyx_v_inclusion_probs);
__Pyx_XDECREF(__pyx_v_samples_idx);
__Pyx_XDECREF(__pyx_v_selected_incs);
__Pyx_XDECREF(__pyx_v_counter);
__Pyx_XDECREF(__pyx_v_replacement);
__Pyx_XDECREF(__pyx_v_tmp);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":735
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":736
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 736, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":735
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":738
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":739
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 739, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":738
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":741
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":742
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 742, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":741
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":744
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":745
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 745, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":744
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":747
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":748
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 748, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":747
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":750
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("PyDataType_SHAPE", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":751
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
__pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0);
if (__pyx_t_1) {
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":752
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape # <<<<<<<<<<<<<<
* else:
* return ()
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape));
__pyx_r = ((PyObject*)__pyx_v_d->subarray->shape);
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":751
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":754
* return <tuple>d.subarray.shape
* else:
* return () # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_empty_tuple);
__pyx_r = __pyx_empty_tuple;
goto __pyx_L0;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":750
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":929
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":930
*
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<<
* PyArray_SetBaseObject(arr, base)
*
*/
Py_INCREF(__pyx_v_base);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":931
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
(void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base));
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":929
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":933
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_v_base;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":934
*
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr) # <<<<<<<<<<<<<<
* if base is NULL:
* return None
*/
__pyx_v_base = PyArray_BASE(__pyx_v_arr);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":935
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
__pyx_t_1 = ((__pyx_v_base == NULL) != 0);
if (__pyx_t_1) {
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":936
* base = PyArray_BASE(arr)
* if base is NULL:
* return None # <<<<<<<<<<<<<<
* return <object>base
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":935
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":937
* if base is NULL:
* return None
* return <object>base # <<<<<<<<<<<<<<
*
* # Versions of the import_* functions which are more suitable for
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_base));
__pyx_r = ((PyObject *)__pyx_v_base);
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":933
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":941
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* __pyx_import_array()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_array", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":942
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":943
* cdef inline int import_array() except -1:
* try:
* __pyx_import_array() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import")
*/
__pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 943, __pyx_L3_error)
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":942
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":944
* try:
* __pyx_import_array()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.multiarray failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 944, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":945
* __pyx_import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 945, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 945, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":942
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":941
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* __pyx_import_array()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":947
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_umath", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":948
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":949
* cdef inline int import_umath() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 949, __pyx_L3_error)
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":948
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":950
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 950, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":951
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 951, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 951, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":948
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":947
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":953
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_ufunc", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":954
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":955
* cdef inline int import_ufunc() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 955, __pyx_L3_error)
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":954
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":956
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 956, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":957
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef extern from *:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 957, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 957, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":954
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":953
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":967
*
*
* cdef inline bint is_timedelta64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.timedelta64)`
*/
static CYTHON_INLINE int __pyx_f_5numpy_is_timedelta64_object(PyObject *__pyx_v_obj) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_timedelta64_object", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":979
* bool
* """
* return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyTimedeltaArrType_Type));
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":967
*
*
* cdef inline bint is_timedelta64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.timedelta64)`
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":982
*
*
* cdef inline bint is_datetime64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.datetime64)`
*/
static CYTHON_INLINE int __pyx_f_5numpy_is_datetime64_object(PyObject *__pyx_v_obj) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_datetime64_object", 0);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":994
* bool
* """
* return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyDatetimeArrType_Type));
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":982
*
*
* cdef inline bint is_datetime64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.datetime64)`
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":997
*
*
* cdef inline npy_datetime get_datetime64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy datetime64 object
*/
static CYTHON_INLINE npy_datetime __pyx_f_5numpy_get_datetime64_value(PyObject *__pyx_v_obj) {
npy_datetime __pyx_r;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1004
* also needed. That can be found using `get_datetime64_unit`.
* """
* return (<PyDatetimeScalarObject*>obj).obval # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((PyDatetimeScalarObject *)__pyx_v_obj)->obval;
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":997
*
*
* cdef inline npy_datetime get_datetime64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy datetime64 object
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1007
*
*
* cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy timedelta64 object
*/
static CYTHON_INLINE npy_timedelta __pyx_f_5numpy_get_timedelta64_value(PyObject *__pyx_v_obj) {
npy_timedelta __pyx_r;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1011
* returns the int64 value underlying scalar numpy timedelta64 object
* """
* return (<PyTimedeltaScalarObject*>obj).obval # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((PyTimedeltaScalarObject *)__pyx_v_obj)->obval;
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1007
*
*
* cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy timedelta64 object
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1014
*
*
* cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the unit part of the dtype for a numpy datetime64 object.
*/
static CYTHON_INLINE NPY_DATETIMEUNIT __pyx_f_5numpy_get_datetime64_unit(PyObject *__pyx_v_obj) {
NPY_DATETIMEUNIT __pyx_r;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1018
* returns the unit part of the dtype for a numpy datetime64 object.
* """
* return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base # <<<<<<<<<<<<<<
*/
__pyx_r = ((NPY_DATETIMEUNIT)((PyDatetimeScalarObject *)__pyx_v_obj)->obmeta.base);
goto __pyx_L0;
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":1014
*
*
* cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the unit part of the dtype for a numpy datetime64 object.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "cfunc.to_py":65
* @cname("__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py")
* cdef object __Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(DTYPE_t (*f)(DTYPE_t) except *):
* def wrap(DTYPE_t x): # <<<<<<<<<<<<<<
* """wrap(x: 'DTYPE_t') -> 'DTYPE_t'"""
* return f(x)
*/
/* Python wrapper */
static PyObject *__pyx_pw_11cfunc_dot_to_py_40__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py_1wrap(PyObject *__pyx_self, PyObject *__pyx_arg_x); /*proto*/
static char __pyx_doc_11cfunc_dot_to_py_40__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py_wrap[] = "wrap(x: 'DTYPE_t') -> 'DTYPE_t'";
static PyMethodDef __pyx_mdef_11cfunc_dot_to_py_40__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py_1wrap = {"wrap", (PyCFunction)__pyx_pw_11cfunc_dot_to_py_40__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py_1wrap, METH_O, __pyx_doc_11cfunc_dot_to_py_40__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py_wrap};
static PyObject *__pyx_pw_11cfunc_dot_to_py_40__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py_1wrap(PyObject *__pyx_self, PyObject *__pyx_arg_x) {
__pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_v_x;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("wrap (wrapper)", 0);
assert(__pyx_arg_x); {
__pyx_v_x = __pyx_PyFloat_AsDouble(__pyx_arg_x); if (unlikely((__pyx_v_x == ((npy_float64)-1)) && PyErr_Occurred())) __PYX_ERR(2, 65, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L3_error:;
__Pyx_AddTraceback("cfunc.to_py.__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py.wrap", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_11cfunc_dot_to_py_40__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py_wrap(__pyx_self, ((__pyx_t_7fairseq_6cps_dp_DTYPE_t)__pyx_v_x));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_11cfunc_dot_to_py_40__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py_wrap(PyObject *__pyx_self, __pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_v_x) {
struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py *__pyx_cur_scope;
struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py *__pyx_outer_scope;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__pyx_t_7fairseq_6cps_dp_DTYPE_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("wrap", 0);
__pyx_outer_scope = (struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py *) __Pyx_CyFunction_GetClosure(__pyx_self);
__pyx_cur_scope = __pyx_outer_scope;
/* "cfunc.to_py":67
* def wrap(DTYPE_t x):
* """wrap(x: 'DTYPE_t') -> 'DTYPE_t'"""
* return f(x) # <<<<<<<<<<<<<<
* return wrap
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_cur_scope->__pyx_v_f(__pyx_v_x); if (unlikely(__pyx_t_1 == ((__pyx_t_7fairseq_6cps_dp_DTYPE_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 67, __pyx_L1_error)
__pyx_t_2 = PyFloat_FromDouble(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "cfunc.to_py":65
* @cname("__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py")
* cdef object __Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(DTYPE_t (*f)(DTYPE_t) except *):
* def wrap(DTYPE_t x): # <<<<<<<<<<<<<<
* """wrap(x: 'DTYPE_t') -> 'DTYPE_t'"""
* return f(x)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("cfunc.to_py.__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py.wrap", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cfunc.to_py":64
*
* @cname("__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py")
* cdef object __Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(DTYPE_t (*f)(DTYPE_t) except *): # <<<<<<<<<<<<<<
* def wrap(DTYPE_t x):
* """wrap(x: 'DTYPE_t') -> 'DTYPE_t'"""
*/
static PyObject *__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(__pyx_t_7fairseq_6cps_dp_DTYPE_t (*__pyx_v_f)(__pyx_t_7fairseq_6cps_dp_DTYPE_t)) {
struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py *__pyx_cur_scope;
PyObject *__pyx_v_wrap = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py", 0);
__pyx_cur_scope = (struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py *)__pyx_tp_new___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(__pyx_ptype___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py, __pyx_empty_tuple, NULL);
if (unlikely(!__pyx_cur_scope)) {
__pyx_cur_scope = ((struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py *)Py_None);
__Pyx_INCREF(Py_None);
__PYX_ERR(2, 64, __pyx_L1_error)
} else {
__Pyx_GOTREF(__pyx_cur_scope);
}
__pyx_cur_scope->__pyx_v_f = __pyx_v_f;
/* "cfunc.to_py":65
* @cname("__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py")
* cdef object __Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(DTYPE_t (*f)(DTYPE_t) except *):
* def wrap(DTYPE_t x): # <<<<<<<<<<<<<<
* """wrap(x: 'DTYPE_t') -> 'DTYPE_t'"""
* return f(x)
*/
__pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_11cfunc_dot_to_py_40__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py_1wrap, 0, __pyx_n_s_Pyx_CFunc_DTYPE__t____DTYPE__t, ((PyObject*)__pyx_cur_scope), __pyx_n_s_cfunc_to_py, __pyx_d, ((PyObject *)__pyx_codeobj__6)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_wrap = __pyx_t_1;
__pyx_t_1 = 0;
/* "cfunc.to_py":68
* """wrap(x: 'DTYPE_t') -> 'DTYPE_t'"""
* return f(x)
* return wrap # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_wrap);
__pyx_r = __pyx_v_wrap;
goto __pyx_L0;
/* "cfunc.to_py":64
*
* @cname("__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py")
* cdef object __Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(DTYPE_t (*f)(DTYPE_t) except *): # <<<<<<<<<<<<<<
* def wrap(DTYPE_t x):
* """wrap(x: 'DTYPE_t') -> 'DTYPE_t'"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("cfunc.to_py.__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_wrap);
__Pyx_DECREF(((PyObject *)__pyx_cur_scope));
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py *__pyx_freelist___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py[8];
static int __pyx_freecount___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py = 0;
static PyObject *__pyx_tp_new___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
PyObject *o;
if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py)))) {
o = (PyObject*)__pyx_freelist___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py[--__pyx_freecount___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py];
memset(o, 0, sizeof(struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py));
(void) PyObject_INIT(o, t);
} else {
o = (*t->tp_alloc)(t, 0);
if (unlikely(!o)) return 0;
}
return o;
}
static void __pyx_tp_dealloc___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(PyObject *o) {
if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py)))) {
__pyx_freelist___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py[__pyx_freecount___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py++] = ((struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py *)o);
} else {
(*Py_TYPE(o)->tp_free)(o);
}
}
static PyTypeObject __pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py = {
PyVarObject_HEAD_INIT(0, 0)
"fairseq.cps_dp.__pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py", /*tp_name*/
sizeof(struct __pyx_obj___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
0, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_cps_dp(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_cps_dp},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"cps_dp",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
{&__pyx_n_s_Pyx_CFunc_DTYPE__t____DTYPE__t, __pyx_k_Pyx_CFunc_DTYPE__t____DTYPE__t, sizeof(__pyx_k_Pyx_CFunc_DTYPE__t____DTYPE__t), 0, 0, 1, 1},
{&__pyx_n_s_append, __pyx_k_append, sizeof(__pyx_k_append), 0, 0, 1, 1},
{&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1},
{&__pyx_n_s_asarray, __pyx_k_asarray, sizeof(__pyx_k_asarray), 0, 0, 1, 1},
{&__pyx_n_s_astype, __pyx_k_astype, sizeof(__pyx_k_astype), 0, 0, 1, 1},
{&__pyx_n_s_b, __pyx_k_b, sizeof(__pyx_k_b), 0, 0, 1, 1},
{&__pyx_n_s_calc_log_inclusion_probs, __pyx_k_calc_log_inclusion_probs, sizeof(__pyx_k_calc_log_inclusion_probs), 0, 0, 1, 1},
{&__pyx_n_s_calc_normalization, __pyx_k_calc_normalization, sizeof(__pyx_k_calc_normalization), 0, 0, 1, 1},
{&__pyx_n_s_cfunc_to_py, __pyx_k_cfunc_to_py, sizeof(__pyx_k_cfunc_to_py), 0, 0, 1, 1},
{&__pyx_n_s_choice, __pyx_k_choice, sizeof(__pyx_k_choice), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_copy, __pyx_k_copy, sizeof(__pyx_k_copy), 0, 0, 1, 1},
{&__pyx_n_s_counter, __pyx_k_counter, sizeof(__pyx_k_counter), 0, 0, 1, 1},
{&__pyx_n_s_dp, __pyx_k_dp, sizeof(__pyx_k_dp), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_exp, __pyx_k_exp, sizeof(__pyx_k_exp), 0, 0, 1, 1},
{&__pyx_n_s_fairseq_cps_dp, __pyx_k_fairseq_cps_dp, sizeof(__pyx_k_fairseq_cps_dp), 0, 0, 1, 1},
{&__pyx_kp_s_fairseq_cps_dp_pyx, __pyx_k_fairseq_cps_dp_pyx, sizeof(__pyx_k_fairseq_cps_dp_pyx), 0, 0, 1, 0},
{&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1},
{&__pyx_n_s_full, __pyx_k_full, sizeof(__pyx_k_full), 0, 0, 1, 1},
{&__pyx_n_s_high, __pyx_k_high, sizeof(__pyx_k_high), 0, 0, 1, 1},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_inclusion_probs, __pyx_k_inclusion_probs, sizeof(__pyx_k_inclusion_probs), 0, 0, 1, 1},
{&__pyx_n_s_inf, __pyx_k_inf, sizeof(__pyx_k_inf), 0, 0, 1, 1},
{&__pyx_n_s_intermediate_res, __pyx_k_intermediate_res, sizeof(__pyx_k_intermediate_res), 0, 0, 1, 1},
{&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1},
{&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1},
{&__pyx_n_s_linalg, __pyx_k_linalg, sizeof(__pyx_k_linalg), 0, 0, 1, 1},
{&__pyx_n_s_log, __pyx_k_log, sizeof(__pyx_k_log), 0, 0, 1, 1},
{&__pyx_n_s_log_inclusion_probs, __pyx_k_log_inclusion_probs, sizeof(__pyx_k_log_inclusion_probs), 0, 0, 1, 1},
{&__pyx_n_s_log_prob_filtered, __pyx_k_log_prob_filtered, sizeof(__pyx_k_log_prob_filtered), 0, 0, 1, 1},
{&__pyx_n_s_log_weights, __pyx_k_log_weights, sizeof(__pyx_k_log_weights), 0, 0, 1, 1},
{&__pyx_n_s_logp, __pyx_k_logp, sizeof(__pyx_k_logp), 0, 0, 1, 1},
{&__pyx_n_s_logp_sliced, __pyx_k_logp_sliced, sizeof(__pyx_k_logp_sliced), 0, 0, 1, 1},
{&__pyx_n_s_low, __pyx_k_low, sizeof(__pyx_k_low), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_map, __pyx_k_map, sizeof(__pyx_k_map), 0, 0, 1, 1},
{&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_nan_to_num, __pyx_k_nan_to_num, sizeof(__pyx_k_nan_to_num), 0, 0, 1, 1},
{&__pyx_n_s_norm, __pyx_k_norm, sizeof(__pyx_k_norm), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0},
{&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0},
{&__pyx_n_s_ord, __pyx_k_ord, sizeof(__pyx_k_ord), 0, 0, 1, 1},
{&__pyx_n_s_p, __pyx_k_p, sizeof(__pyx_k_p), 0, 0, 1, 1},
{&__pyx_n_s_r, __pyx_k_r, sizeof(__pyx_k_r), 0, 0, 1, 1},
{&__pyx_n_s_randint, __pyx_k_randint, sizeof(__pyx_k_randint), 0, 0, 1, 1},
{&__pyx_n_s_random, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_remaining_subsetsum_product_prob, __pyx_k_remaining_subsetsum_product_prob, sizeof(__pyx_k_remaining_subsetsum_product_prob), 0, 0, 1, 1},
{&__pyx_n_s_replacement, __pyx_k_replacement, sizeof(__pyx_k_replacement), 0, 0, 1, 1},
{&__pyx_n_s_sampford_sample, __pyx_k_sampford_sample, sizeof(__pyx_k_sampford_sample), 0, 0, 1, 1},
{&__pyx_n_s_sample, __pyx_k_sample, sizeof(__pyx_k_sample), 0, 0, 1, 1},
{&__pyx_n_s_samples_idx, __pyx_k_samples_idx, sizeof(__pyx_k_samples_idx), 0, 0, 1, 1},
{&__pyx_n_s_selected_incs, __pyx_k_selected_incs, sizeof(__pyx_k_selected_incs), 0, 0, 1, 1},
{&__pyx_n_s_selected_inds, __pyx_k_selected_inds, sizeof(__pyx_k_selected_inds), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_subset_sum_product_probs, __pyx_k_subset_sum_product_probs, sizeof(__pyx_k_subset_sum_product_probs), 0, 0, 1, 1},
{&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_thresh, __pyx_k_thresh, sizeof(__pyx_k_thresh), 0, 0, 1, 1},
{&__pyx_n_s_thresholds, __pyx_k_thresholds, sizeof(__pyx_k_thresholds), 0, 0, 1, 1},
{&__pyx_n_s_tmp, __pyx_k_tmp, sizeof(__pyx_k_tmp), 0, 0, 1, 1},
{&__pyx_n_s_to_pick_number, __pyx_k_to_pick_number, sizeof(__pyx_k_to_pick_number), 0, 0, 1, 1},
{&__pyx_n_s_uniform, __pyx_k_uniform, sizeof(__pyx_k_uniform), 0, 0, 1, 1},
{&__pyx_n_s_wrap, __pyx_k_wrap, sizeof(__pyx_k_wrap), 0, 0, 1, 1},
{&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 92, __pyx_L1_error)
__pyx_builtin_map = __Pyx_GetBuiltinName(__pyx_n_s_map); if (!__pyx_builtin_map) __PYX_ERR(0, 152, __pyx_L1_error)
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 945, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "fairseq/cps_dp.pyx":87
*
* subset_sum_product_probs = np.full((k + 1, n + 1), -np.inf, dtype=np.float64)
* subset_sum_product_probs[0, :] = 0. # <<<<<<<<<<<<<<
* cdef float intermediate_res
* cdef int r
*/
__pyx_slice_ = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 87, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice_);
__Pyx_GIVEREF(__pyx_slice_);
__pyx_tuple__2 = PyTuple_Pack(2, __pyx_int_0, __pyx_slice_); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 87, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":945
* __pyx_import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 945, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "../../.local/lib/python3.8/site-packages/numpy/__init__.pxd":951
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 951, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "cfunc.to_py":65
* @cname("__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py")
* cdef object __Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(DTYPE_t (*f)(DTYPE_t) except *):
* def wrap(DTYPE_t x): # <<<<<<<<<<<<<<
* """wrap(x: 'DTYPE_t') -> 'DTYPE_t'"""
* return f(x)
*/
__pyx_tuple__5 = PyTuple_Pack(2, __pyx_n_s_x, __pyx_n_s_x); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(2, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
__pyx_codeobj__6 = (PyObject*)__Pyx_PyCode_New(1, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__5, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_wrap, 65, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__6)) __PYX_ERR(2, 65, __pyx_L1_error)
/* "fairseq/cps_dp.pyx":75
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def calc_normalization(np.ndarray[DTYPE_t, ndim=1] logp_sliced, int k): # <<<<<<<<<<<<<<
* """
* This function calculates the normalization factor in CPS which is
*/
__pyx_tuple__7 = PyTuple_Pack(7, __pyx_n_s_logp_sliced, __pyx_n_s_k, __pyx_n_s_n, __pyx_n_s_subset_sum_product_probs, __pyx_n_s_intermediate_res, __pyx_n_s_r, __pyx_n_s_i); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
__pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_fairseq_cps_dp_pyx, __pyx_n_s_calc_normalization, 75, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) __PYX_ERR(0, 75, __pyx_L1_error)
/* "fairseq/cps_dp.pyx":100
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def calc_log_inclusion_probs(np.ndarray[DTYPE_t, ndim=1] logp_sliced, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] subset_sum_product_probs, int k):
* """
*/
__pyx_tuple__9 = PyTuple_Pack(9, __pyx_n_s_logp_sliced, __pyx_n_s_subset_sum_product_probs, __pyx_n_s_k, __pyx_n_s_n, __pyx_n_s_dp, __pyx_n_s_log_inclusion_probs, __pyx_n_s_remaining_subsetsum_product_prob, __pyx_n_s_r, __pyx_n_s_i); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(0, 100, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
__pyx_codeobj__10 = (PyObject*)__Pyx_PyCode_New(3, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__9, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_fairseq_cps_dp_pyx, __pyx_n_s_calc_log_inclusion_probs, 100, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__10)) __PYX_ERR(0, 100, __pyx_L1_error)
/* "fairseq/cps_dp.pyx":133
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def sample(np.ndarray[DTYPE_t, ndim=1] logp, np.ndarray[DTYPE_int_t, ndim=1] selected_inds, int k): # <<<<<<<<<<<<<<
* """
* This function picks a sample of size k from candidates
*/
__pyx_tuple__11 = PyTuple_Pack(14, __pyx_n_s_logp, __pyx_n_s_selected_inds, __pyx_n_s_k, __pyx_n_s_n, __pyx_n_s_samples_idx, __pyx_n_s_selected_incs, __pyx_n_s_thresholds, __pyx_n_s_log_weights, __pyx_n_s_log_prob_filtered, __pyx_n_s_i, __pyx_n_s_subset_sum_product_probs, __pyx_n_s_thresh, __pyx_n_s_to_pick_number, __pyx_n_s_log_inclusion_probs); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
__pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(3, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_fairseq_cps_dp_pyx, __pyx_n_s_sample, 133, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 133, __pyx_L1_error)
/* "fairseq/cps_dp.pyx":178
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def sampford_sample(np.ndarray[DTYPE_t, ndim=1] logp, np.ndarray[DTYPE_int_t, ndim=1] selected_inds, int k): # <<<<<<<<<<<<<<
* cdef long n = len(logp)
* k = min(n, k)
*/
__pyx_tuple__13 = PyTuple_Pack(20, __pyx_n_s_logp, __pyx_n_s_selected_inds, __pyx_n_s_k, __pyx_n_s_n, __pyx_n_s_thresholds, __pyx_n_s_log_weights, __pyx_n_s_log_prob_filtered, __pyx_n_s_i, __pyx_n_s_subset_sum_product_probs, __pyx_n_s_thresh, __pyx_n_s_to_pick_number, __pyx_n_s_log_inclusion_probs, __pyx_n_s_inclusion_probs, __pyx_n_s_j, __pyx_n_s_b, __pyx_n_s_samples_idx, __pyx_n_s_selected_incs, __pyx_n_s_counter, __pyx_n_s_replacement, __pyx_n_s_tmp); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 178, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
__pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(3, 0, 20, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_fairseq_cps_dp_pyx, __pyx_n_s_sampford_sample, 178, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 178, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_float_0_ = PyFloat_FromDouble(0.); if (unlikely(!__pyx_float_0_)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_float_0_99 = PyFloat_FromDouble(0.99); if (unlikely(!__pyx_float_0_99)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_10 = PyInt_FromLong(10); if (unlikely(!__pyx_int_10)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_60 = PyInt_FromLong(-60); if (unlikely(!__pyx_int_neg_60)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
if (PyType_Ready(&__pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py) < 0) __PYX_ERR(2, 64, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py.tp_dictoffset && __pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
}
__pyx_ptype___pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py = &__pyx_scope_struct____Pyx_CFunc_DTYPE__t____DTYPE__t___to_py;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
__Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 200, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 200, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 223, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 227, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 239, __pyx_L1_error)
__pyx_ptype_5numpy_generic = __Pyx_ImportType(__pyx_t_1, "numpy", "generic", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_generic) __PYX_ERR(1, 771, __pyx_L1_error)
__pyx_ptype_5numpy_number = __Pyx_ImportType(__pyx_t_1, "numpy", "number", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_number) __PYX_ERR(1, 773, __pyx_L1_error)
__pyx_ptype_5numpy_integer = __Pyx_ImportType(__pyx_t_1, "numpy", "integer", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_integer) __PYX_ERR(1, 775, __pyx_L1_error)
__pyx_ptype_5numpy_signedinteger = __Pyx_ImportType(__pyx_t_1, "numpy", "signedinteger", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_signedinteger) __PYX_ERR(1, 777, __pyx_L1_error)
__pyx_ptype_5numpy_unsignedinteger = __Pyx_ImportType(__pyx_t_1, "numpy", "unsignedinteger", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_unsignedinteger) __PYX_ERR(1, 779, __pyx_L1_error)
__pyx_ptype_5numpy_inexact = __Pyx_ImportType(__pyx_t_1, "numpy", "inexact", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_inexact) __PYX_ERR(1, 781, __pyx_L1_error)
__pyx_ptype_5numpy_floating = __Pyx_ImportType(__pyx_t_1, "numpy", "floating", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_floating) __PYX_ERR(1, 783, __pyx_L1_error)
__pyx_ptype_5numpy_complexfloating = __Pyx_ImportType(__pyx_t_1, "numpy", "complexfloating", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_complexfloating) __PYX_ERR(1, 785, __pyx_L1_error)
__pyx_ptype_5numpy_flexible = __Pyx_ImportType(__pyx_t_1, "numpy", "flexible", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_flexible) __PYX_ERR(1, 787, __pyx_L1_error)
__pyx_ptype_5numpy_character = __Pyx_ImportType(__pyx_t_1, "numpy", "character", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_character) __PYX_ERR(1, 789, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initcps_dp(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initcps_dp(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_cps_dp(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_cps_dp(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_cps_dp(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'cps_dp' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_cps_dp(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
PyEval_InitThreads();
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("cps_dp", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_fairseq__cps_dp) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "fairseq.cps_dp")) {
if (unlikely(PyDict_SetItemString(modules, "fairseq.cps_dp", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "fairseq/cps_dp.pyx":1
* import numpy as np # <<<<<<<<<<<<<<
* from libc.math cimport NAN
* from libc.math cimport isnan
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "fairseq/cps_dp.pyx":4
* from libc.math cimport NAN
* from libc.math cimport isnan
* from numpy import inf # <<<<<<<<<<<<<<
*
* cimport cython
*/
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_n_s_inf);
__Pyx_GIVEREF(__pyx_n_s_inf);
PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_inf);
__pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_inf); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_inf, __pyx_t_1) < 0) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "fairseq/cps_dp.pyx":75
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def calc_normalization(np.ndarray[DTYPE_t, ndim=1] logp_sliced, int k): # <<<<<<<<<<<<<<
* """
* This function calculates the normalization factor in CPS which is
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7fairseq_6cps_dp_1calc_normalization, NULL, __pyx_n_s_fairseq_cps_dp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_calc_normalization, __pyx_t_2) < 0) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "fairseq/cps_dp.pyx":100
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def calc_log_inclusion_probs(np.ndarray[DTYPE_t, ndim=1] logp_sliced, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] subset_sum_product_probs, int k):
* """
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7fairseq_6cps_dp_3calc_log_inclusion_probs, NULL, __pyx_n_s_fairseq_cps_dp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 100, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_calc_log_inclusion_probs, __pyx_t_2) < 0) __PYX_ERR(0, 100, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "fairseq/cps_dp.pyx":133
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def sample(np.ndarray[DTYPE_t, ndim=1] logp, np.ndarray[DTYPE_int_t, ndim=1] selected_inds, int k): # <<<<<<<<<<<<<<
* """
* This function picks a sample of size k from candidates
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7fairseq_6cps_dp_5sample, NULL, __pyx_n_s_fairseq_cps_dp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_sample, __pyx_t_2) < 0) __PYX_ERR(0, 133, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "fairseq/cps_dp.pyx":178
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def sampford_sample(np.ndarray[DTYPE_t, ndim=1] logp, np.ndarray[DTYPE_int_t, ndim=1] selected_inds, int k): # <<<<<<<<<<<<<<
* cdef long n = len(logp)
* k = min(n, k)
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7fairseq_6cps_dp_7sampford_sample, NULL, __pyx_n_s_fairseq_cps_dp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 178, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_sampford_sample, __pyx_t_2) < 0) __PYX_ERR(0, 178, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "fairseq/cps_dp.pyx":1
* import numpy as np # <<<<<<<<<<<<<<
* from libc.math cimport NAN
* from libc.math cimport isnan
*/
__pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "cfunc.to_py":64
*
* @cname("__Pyx_CFunc_DTYPE__t____DTYPE__t___to_py")
* cdef object __Pyx_CFunc_DTYPE__t____DTYPE__t___to_py(DTYPE_t (*f)(DTYPE_t) except *): # <<<<<<<<<<<<<<
* def wrap(DTYPE_t x):
* """wrap(x: 'DTYPE_t') -> 'DTYPE_t'"""
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init fairseq.cps_dp", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init fairseq.cps_dp");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case '?': return "'bool'";
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number, ndim;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ndim = ctx->head->field->type->ndim;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) &&
(ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* BufferGetAndValidate */
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (unlikely(info->buf == NULL)) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static int __Pyx__GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
buf->buf = NULL;
if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) {
__Pyx_ZeroBuffer(buf);
return -1;
}
if (unlikely(buf->ndim != nd)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if (unlikely((size_t)buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_SafeReleaseBuffer(buf);
return -1;
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = Py_TYPE(func)->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* BufferFallbackError */
static void __Pyx_RaiseBufferFallbackError(void) {
PyErr_SetString(PyExc_ValueError,
"Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (__Pyx_PyFastCFunction_Check(func)) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, NULL, 0);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
#else
if (likely(PyCFunction_Check(func)))
#endif
{
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* PyIntCompare */
static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) {
if (op1 == op2) {
Py_RETURN_TRUE;
}
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long a = PyInt_AS_LONG(op1);
if (a == b) Py_RETURN_TRUE; else Py_RETURN_FALSE;
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
int unequal;
unsigned long uintval;
Py_ssize_t size = Py_SIZE(op1);
const digit* digits = ((PyLongObject*)op1)->ob_digit;
if (intval == 0) {
if (size == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE;
} else if (intval < 0) {
if (size >= 0)
Py_RETURN_FALSE;
intval = -intval;
size = -size;
} else {
if (size <= 0)
Py_RETURN_FALSE;
}
uintval = (unsigned long) intval;
#if PyLong_SHIFT * 4 < SIZEOF_LONG*8
if (uintval >> (PyLong_SHIFT * 4)) {
unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
| (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
} else
#endif
#if PyLong_SHIFT * 3 < SIZEOF_LONG*8
if (uintval >> (PyLong_SHIFT * 3)) {
unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
| (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
} else
#endif
#if PyLong_SHIFT * 2 < SIZEOF_LONG*8
if (uintval >> (PyLong_SHIFT * 2)) {
unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
| (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
} else
#endif
#if PyLong_SHIFT * 1 < SIZEOF_LONG*8
if (uintval >> (PyLong_SHIFT * 1)) {
unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
| (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
} else
#endif
unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK));
if (unequal == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE;
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
if ((double)a == (double)b) Py_RETURN_TRUE; else Py_RETURN_FALSE;
}
return (
PyObject_RichCompare(op1, op2, Py_EQ));
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* IterFinish */
static CYTHON_INLINE int __Pyx_IterFinish(void) {
#if CYTHON_FAST_THREAD_STATE
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* exc_type = tstate->curexc_type;
if (unlikely(exc_type)) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) {
PyObject *exc_value, *exc_tb;
exc_value = tstate->curexc_value;
exc_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
Py_DECREF(exc_type);
Py_XDECREF(exc_value);
Py_XDECREF(exc_tb);
return 0;
} else {
return -1;
}
}
return 0;
#else
if (unlikely(PyErr_Occurred())) {
if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
PyErr_Clear();
return 0;
} else {
return -1;
}
}
return 0;
#endif
}
/* UnpackItemEndCheck */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
if (unlikely(retval)) {
Py_DECREF(retval);
__Pyx_RaiseTooManyValuesError(expected);
return -1;
} else {
return __Pyx_IterFinish();
}
return 0;
}
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* FetchCommonType */
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) {
PyObject* fake_module;
PyTypeObject* cached_type = NULL;
fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI);
if (!fake_module) return NULL;
Py_INCREF(fake_module);
cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name);
if (cached_type) {
if (!PyType_Check((PyObject*)cached_type)) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s is not a type object",
type->tp_name);
goto bad;
}
if (cached_type->tp_basicsize != type->tp_basicsize) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s has the wrong size, try recompiling",
type->tp_name);
goto bad;
}
} else {
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad;
PyErr_Clear();
if (PyType_Ready(type) < 0) goto bad;
if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0)
goto bad;
Py_INCREF(type);
cached_type = type;
}
done:
Py_DECREF(fake_module);
return cached_type;
bad:
Py_XDECREF(cached_type);
cached_type = NULL;
goto done;
}
/* CythonFunctionShared */
#include <structmember.h>
static PyObject *
__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure)
{
if (unlikely(op->func_doc == NULL)) {
if (op->func.m_ml->ml_doc) {
#if PY_MAJOR_VERSION >= 3
op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc);
#else
op->func_doc = PyString_FromString(op->func.m_ml->ml_doc);
#endif
if (unlikely(op->func_doc == NULL))
return NULL;
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
Py_INCREF(op->func_doc);
return op->func_doc;
}
static int
__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp = op->func_doc;
if (value == NULL) {
value = Py_None;
}
Py_INCREF(value);
op->func_doc = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
if (unlikely(op->func_name == NULL)) {
#if PY_MAJOR_VERSION >= 3
op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name);
#else
op->func_name = PyString_InternFromString(op->func.m_ml->ml_name);
#endif
if (unlikely(op->func_name == NULL))
return NULL;
}
Py_INCREF(op->func_name);
return op->func_name;
}
static int
__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value)))
#else
if (unlikely(value == NULL || !PyString_Check(value)))
#endif
{
PyErr_SetString(PyExc_TypeError,
"__name__ must be set to a string object");
return -1;
}
tmp = op->func_name;
Py_INCREF(value);
op->func_name = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(op->func_qualname);
return op->func_qualname;
}
static int
__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value)))
#else
if (unlikely(value == NULL || !PyString_Check(value)))
#endif
{
PyErr_SetString(PyExc_TypeError,
"__qualname__ must be set to a string object");
return -1;
}
tmp = op->func_qualname;
Py_INCREF(value);
op->func_qualname = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure)
{
PyObject *self;
self = m->func_closure;
if (self == NULL)
self = Py_None;
Py_INCREF(self);
return self;
}
static PyObject *
__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
if (unlikely(op->func_dict == NULL)) {
op->func_dict = PyDict_New();
if (unlikely(op->func_dict == NULL))
return NULL;
}
Py_INCREF(op->func_dict);
return op->func_dict;
}
static int
__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
if (unlikely(value == NULL)) {
PyErr_SetString(PyExc_TypeError,
"function's dictionary may not be deleted");
return -1;
}
if (unlikely(!PyDict_Check(value))) {
PyErr_SetString(PyExc_TypeError,
"setting function's dictionary to a non-dict");
return -1;
}
tmp = op->func_dict;
Py_INCREF(value);
op->func_dict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(op->func_globals);
return op->func_globals;
}
static PyObject *
__Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(Py_None);
return Py_None;
}
static PyObject *
__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
PyObject* result = (op->func_code) ? op->func_code : Py_None;
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) {
int result = 0;
PyObject *res = op->defaults_getter((PyObject *) op);
if (unlikely(!res))
return -1;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
op->defaults_tuple = PyTuple_GET_ITEM(res, 0);
Py_INCREF(op->defaults_tuple);
op->defaults_kwdict = PyTuple_GET_ITEM(res, 1);
Py_INCREF(op->defaults_kwdict);
#else
op->defaults_tuple = PySequence_ITEM(res, 0);
if (unlikely(!op->defaults_tuple)) result = -1;
else {
op->defaults_kwdict = PySequence_ITEM(res, 1);
if (unlikely(!op->defaults_kwdict)) result = -1;
}
#endif
Py_DECREF(res);
return result;
}
static int
__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value) {
value = Py_None;
} else if (value != Py_None && !PyTuple_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__defaults__ must be set to a tuple object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_tuple;
op->defaults_tuple = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->defaults_tuple;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_tuple;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value) {
value = Py_None;
} else if (value != Py_None && !PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__kwdefaults__ must be set to a dict object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_kwdict;
op->defaults_kwdict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->defaults_kwdict;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_kwdict;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value || value == Py_None) {
value = NULL;
} else if (!PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__annotations__ must be set to a dict object");
return -1;
}
Py_XINCREF(value);
tmp = op->func_annotations;
op->func_annotations = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->func_annotations;
if (unlikely(!result)) {
result = PyDict_New();
if (unlikely(!result)) return NULL;
op->func_annotations = result;
}
Py_INCREF(result);
return result;
}
static PyGetSetDef __pyx_CyFunction_getsets[] = {
{(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0},
{(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0},
{(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0},
{(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0},
{0, 0, 0, 0, 0}
};
static PyMemberDef __pyx_CyFunction_members[] = {
{(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), PY_WRITE_RESTRICTED, 0},
{0, 0, 0, 0, 0}
};
static PyObject *
__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args)
{
#if PY_MAJOR_VERSION >= 3
Py_INCREF(m->func_qualname);
return m->func_qualname;
#else
return PyString_FromString(m->func.m_ml->ml_name);
#endif
}
static PyMethodDef __pyx_CyFunction_methods[] = {
{"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0},
{0, 0, 0, 0}
};
#if PY_VERSION_HEX < 0x030500A0
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist)
#else
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist)
#endif
static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname,
PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
if (unlikely(op == NULL))
return NULL;
op->flags = flags;
__Pyx_CyFunction_weakreflist(op) = NULL;
op->func.m_ml = ml;
op->func.m_self = (PyObject *) op;
Py_XINCREF(closure);
op->func_closure = closure;
Py_XINCREF(module);
op->func.m_module = module;
op->func_dict = NULL;
op->func_name = NULL;
Py_INCREF(qualname);
op->func_qualname = qualname;
op->func_doc = NULL;
op->func_classobj = NULL;
op->func_globals = globals;
Py_INCREF(op->func_globals);
Py_XINCREF(code);
op->func_code = code;
op->defaults_pyobjects = 0;
op->defaults_size = 0;
op->defaults = NULL;
op->defaults_tuple = NULL;
op->defaults_kwdict = NULL;
op->defaults_getter = NULL;
op->func_annotations = NULL;
return (PyObject *) op;
}
static int
__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m)
{
Py_CLEAR(m->func_closure);
Py_CLEAR(m->func.m_module);
Py_CLEAR(m->func_dict);
Py_CLEAR(m->func_name);
Py_CLEAR(m->func_qualname);
Py_CLEAR(m->func_doc);
Py_CLEAR(m->func_globals);
Py_CLEAR(m->func_code);
Py_CLEAR(m->func_classobj);
Py_CLEAR(m->defaults_tuple);
Py_CLEAR(m->defaults_kwdict);
Py_CLEAR(m->func_annotations);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_XDECREF(pydefaults[i]);
PyObject_Free(m->defaults);
m->defaults = NULL;
}
return 0;
}
static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
if (__Pyx_CyFunction_weakreflist(m) != NULL)
PyObject_ClearWeakRefs((PyObject *) m);
__Pyx_CyFunction_clear(m);
PyObject_GC_Del(m);
}
static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
PyObject_GC_UnTrack(m);
__Pyx__CyFunction_dealloc(m);
}
static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg)
{
Py_VISIT(m->func_closure);
Py_VISIT(m->func.m_module);
Py_VISIT(m->func_dict);
Py_VISIT(m->func_name);
Py_VISIT(m->func_qualname);
Py_VISIT(m->func_doc);
Py_VISIT(m->func_globals);
Py_VISIT(m->func_code);
Py_VISIT(m->func_classobj);
Py_VISIT(m->defaults_tuple);
Py_VISIT(m->defaults_kwdict);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_VISIT(pydefaults[i]);
}
return 0;
}
static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type)
{
#if PY_MAJOR_VERSION < 3
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) {
Py_INCREF(func);
return func;
}
if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) {
if (type == NULL)
type = (PyObject *)(Py_TYPE(obj));
return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type)));
}
if (obj == Py_None)
obj = NULL;
#endif
return __Pyx_PyMethod_New(func, obj, type);
}
static PyObject*
__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op)
{
#if PY_MAJOR_VERSION >= 3
return PyUnicode_FromFormat("<cyfunction %U at %p>",
op->func_qualname, (void *)op);
#else
return PyString_FromFormat("<cyfunction %s at %p>",
PyString_AsString(op->func_qualname), (void *)op);
#endif
}
static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) {
PyCFunctionObject* f = (PyCFunctionObject*)func;
PyCFunction meth = f->m_ml->ml_meth;
Py_ssize_t size;
switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) {
case METH_VARARGS:
if (likely(kw == NULL || PyDict_Size(kw) == 0))
return (*meth)(self, arg);
break;
case METH_VARARGS | METH_KEYWORDS:
return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw);
case METH_NOARGS:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
size = PyTuple_GET_SIZE(arg);
if (likely(size == 0))
return (*meth)(self, NULL);
PyErr_Format(PyExc_TypeError,
"%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
case METH_O:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
size = PyTuple_GET_SIZE(arg);
if (likely(size == 1)) {
PyObject *result, *arg0;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
arg0 = PyTuple_GET_ITEM(arg, 0);
#else
arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL;
#endif
result = (*meth)(self, arg0);
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_DECREF(arg0);
#endif
return result;
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
default:
PyErr_SetString(PyExc_SystemError, "Bad call flags in "
"__Pyx_CyFunction_Call. METH_OLDARGS is no "
"longer supported!");
return NULL;
}
PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments",
f->m_ml->ml_name);
return NULL;
}
static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) {
return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw);
}
static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) {
PyObject *result;
__pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) {
Py_ssize_t argc;
PyObject *new_args;
PyObject *self;
argc = PyTuple_GET_SIZE(args);
new_args = PyTuple_GetSlice(args, 1, argc);
if (unlikely(!new_args))
return NULL;
self = PyTuple_GetItem(args, 0);
if (unlikely(!self)) {
Py_DECREF(new_args);
return NULL;
}
result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw);
Py_DECREF(new_args);
} else {
result = __Pyx_CyFunction_Call(func, args, kw);
}
return result;
}
static PyTypeObject __pyx_CyFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
"cython_function_or_method",
sizeof(__pyx_CyFunctionObject),
0,
(destructor) __Pyx_CyFunction_dealloc,
0,
0,
0,
#if PY_MAJOR_VERSION < 3
0,
#else
0,
#endif
(reprfunc) __Pyx_CyFunction_repr,
0,
0,
0,
0,
__Pyx_CyFunction_CallAsMethod,
0,
0,
0,
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
0,
(traverseproc) __Pyx_CyFunction_traverse,
(inquiry) __Pyx_CyFunction_clear,
0,
#if PY_VERSION_HEX < 0x030500A0
offsetof(__pyx_CyFunctionObject, func_weakreflist),
#else
offsetof(PyCFunctionObject, m_weakreflist),
#endif
0,
0,
__pyx_CyFunction_methods,
__pyx_CyFunction_members,
__pyx_CyFunction_getsets,
0,
0,
__Pyx_CyFunction_descr_get,
0,
offsetof(__pyx_CyFunctionObject, func_dict),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
#if PY_VERSION_HEX >= 0x030400a1
0,
#endif
#if PY_VERSION_HEX >= 0x030800b1
0,
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0,
#endif
};
static int __pyx_CyFunction_init(void) {
__pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type);
if (unlikely(__pyx_CyFunctionType == NULL)) {
return -1;
}
return 0;
}
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults = PyObject_Malloc(size);
if (unlikely(!m->defaults))
return PyErr_NoMemory();
memset(m->defaults, 0, size);
m->defaults_pyobjects = pyobjects;
m->defaults_size = size;
return m->defaults;
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_tuple = tuple;
Py_INCREF(tuple);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_kwdict = dict;
Py_INCREF(dict);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->func_annotations = dict;
Py_INCREF(dict);
}
/* CythonFunction */
static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname,
PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
PyObject *op = __Pyx_CyFunction_Init(
PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType),
ml, flags, qualname, closure, module, globals, code
);
if (likely(op)) {
PyObject_GC_Track(op);
}
return op;
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
size_t size, enum __Pyx_ImportType_CheckSize check_size)
{
PyObject *result = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
result = PyObject_GetAttrString(module, class_name);
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if ((size_t)basicsize < size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(result);
return NULL;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabsf(b.real) >= fabsf(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
float r = b.imag / b.real;
float s = (float)(1.0) / (b.real + b.imag * r);
return __pyx_t_float_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
float r = b.real / b.imag;
float s = (float)(1.0) / (b.imag + b.real * r);
return __pyx_t_float_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
float denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_float_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_float(a, a);
case 3:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, a);
case 4:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = powf(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2f(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_float(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs(b.real) >= fabs(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
double r = b.imag / b.real;
double s = (double)(1.0) / (b.real + b.imag * r);
return __pyx_t_double_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
double r = b.real / b.imag;
double s = (double)(1.0) / (b.imag + b.real * r);
return __pyx_t_double_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
double denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_double_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_double(a, a);
case 3:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, a);
case 4:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_double(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_npy_int64(npy_int64 value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const npy_int64 neg_one = (npy_int64) -1, const_zero = (npy_int64) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(npy_int64) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(npy_int64) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(npy_int64) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(npy_int64) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(npy_int64) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(npy_int64),
little, !is_unsigned);
}
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
GB_binop__rminus_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int32)
// A*D function (colscale): GB (_AxD__rminus_int32)
// D*A function (rowscale): GB (_DxB__rminus_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int32)
// C=scalar+B GB (_bind1st__rminus_int32)
// C=scalar+B' GB (_bind1st_tran__rminus_int32)
// C=A+scalar GB (_bind2nd__rminus_int32)
// C=A'+scalar GB (_bind2nd_tran__rminus_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT32 || GxB_NO_RMINUS_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_split_sparse.c | //------------------------------------------------------------------------------
// GB_split_sparse: split a sparse/hypersparse matrix into tiles
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#define GB_FREE_WORKSPACE \
GB_WERK_POP (C_ek_slicing, int64_t) ; \
GB_FREE_WORK (&Wp, Wp_size) ;
#define GB_FREE_ALL \
GB_FREE_WORKSPACE ; \
GB_Matrix_free (&C) ;
#include "GB_split.h"
GrB_Info GB_split_sparse // split a sparse matrix
(
GrB_Matrix *Tiles, // 2D row-major array of size m-by-n
const GrB_Index m,
const GrB_Index n,
const int64_t *restrict Tile_rows, // size m+1
const int64_t *restrict Tile_cols, // size n+1
const GrB_Matrix A, // input matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GrB_Info info ;
int A_sparsity = GB_sparsity (A) ;
bool A_is_hyper = (A_sparsity == GxB_HYPERSPARSE) ;
ASSERT (A_is_hyper || A_sparsity == GxB_SPARSE) ;
GrB_Matrix C = NULL ;
GB_WERK_DECLARE (C_ek_slicing, int64_t) ;
ASSERT_MATRIX_OK (A, "A sparse for split", GB0) ;
int sparsity_control = A->sparsity_control ;
float hyper_switch = A->hyper_switch ;
bool csc = A->is_csc ;
GrB_Type atype = A->type ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
size_t asize = atype->size ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int64_t nouter = csc ? n : m ;
int64_t ninner = csc ? m : n ;
const int64_t *Tile_vdim = csc ? Tile_cols : Tile_rows ;
const int64_t *Tile_vlen = csc ? Tile_rows : Tile_cols ;
int64_t anvec = A->nvec ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const bool A_iso = A->iso ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
size_t Wp_size = 0 ;
int64_t *restrict Wp = NULL ;
Wp = GB_MALLOC_WORK (anvec, int64_t, &Wp_size) ;
if (Wp == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_memcpy (Wp, Ap, anvec * sizeof (int64_t), nthreads_max) ;
//--------------------------------------------------------------------------
// split A into tiles
//--------------------------------------------------------------------------
int64_t akend = 0 ;
for (int64_t outer = 0 ; outer < nouter ; outer++)
{
//----------------------------------------------------------------------
// find the starting and ending vector of these tiles
//----------------------------------------------------------------------
// The tile appears in vectors avstart:avend-1 of A, and indices
// aistart:aiend-1.
const int64_t avstart = Tile_vdim [outer] ;
const int64_t avend = Tile_vdim [outer+1] ;
int64_t akstart = akend ;
if (A_is_hyper)
{
// A is hypersparse: look for vector avend in the A->h hyper list.
// The vectors to handle for this outer loop are in
// Ah [akstart:akend-1].
akend = akstart ;
int64_t pright = anvec - 1 ;
bool found ;
GB_SPLIT_BINARY_SEARCH (avend, Ah, akend, pright, found) ;
ASSERT (GB_IMPLIES (akstart <= akend-1, Ah [akend-1] < avend)) ;
}
else
{
// A is sparse; the vectors to handle are akstart:akend-1
akend = avend ;
}
// # of vectors in all tiles in this outer loop
int64_t cnvec = akend - akstart ;
int nth = GB_nthreads (cnvec, chunk, nthreads_max) ;
//----------------------------------------------------------------------
// create all tiles for vectors akstart:akend-1 in A
//----------------------------------------------------------------------
for (int64_t inner = 0 ; inner < ninner ; inner++)
{
//------------------------------------------------------------------
// allocate C, C->p, and C->h for this tile
//------------------------------------------------------------------
const int64_t aistart = Tile_vlen [inner] ;
const int64_t aiend = Tile_vlen [inner+1] ;
const int64_t cvdim = avend - avstart ;
const int64_t cvlen = aiend - aistart ;
C = NULL ;
GB_OK (GB_new (&C, // new header
atype, cvlen, cvdim, GB_Ap_malloc, csc, A_sparsity,
hyper_switch, cnvec, Context)) ;
C->sparsity_control = sparsity_control ;
C->hyper_switch = hyper_switch ;
C->nvec = cnvec ;
int64_t *restrict Cp = C->p ;
int64_t *restrict Ch = C->h ;
//------------------------------------------------------------------
// determine the boundaries of this tile
//------------------------------------------------------------------
int64_t k ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (k = akstart ; k < akend ; k++)
{
int64_t pA = Wp [k] ;
const int64_t pA_end = Ap [k+1] ;
const int64_t aknz = pA_end - pA ;
if (aknz == 0 || Ai [pA] >= aiend)
{
// this vector of C is empty
}
else if (aknz > 256)
{
// use binary search to find aiend
bool found ;
int64_t pright = pA_end - 1 ;
GB_SPLIT_BINARY_SEARCH (aiend, Ai, pA, pright, found) ;
#ifdef GB_DEBUG
// check the results with a linear search
int64_t p2 = Wp [k] ;
for ( ; p2 < Ap [k+1] ; p2++)
{
if (Ai [p2] >= aiend) break ;
}
ASSERT (pA == p2) ;
#endif
}
else
{
// use a linear-time search to find aiend
for ( ; pA < pA_end ; pA++)
{
if (Ai [pA] >= aiend) break ;
}
#ifdef GB_DEBUG
// check the results with a binary search
bool found ;
int64_t p2 = Wp [k] ;
int64_t p2_end = Ap [k+1] - 1 ;
GB_SPLIT_BINARY_SEARCH (aiend, Ai, p2, p2_end, found) ;
ASSERT (pA == p2) ;
#endif
}
Cp [k-akstart] = (pA - Wp [k]) ; // # of entries in this vector
if (A_is_hyper)
{
Ch [k-akstart] = Ah [k] - avstart ;
}
}
GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nth, Context) ;
int64_t cnz = Cp [cnvec] ;
//------------------------------------------------------------------
// allocate C->i and C->x for this tile
//------------------------------------------------------------------
// set C->iso = A_iso OK
GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, A_iso,
Context)) ;
int64_t *restrict Ci = C->i ;
C->magic = GB_MAGIC ; // for GB_nnz_held(C), to slice C
//------------------------------------------------------------------
// copy the tile from A into C
//------------------------------------------------------------------
int C_ntasks, C_nthreads ;
GB_SLICE_MATRIX (C, 8, chunk) ;
bool done = false ;
if (A_iso)
{
//--------------------------------------------------------------
// split an iso matrix A into an iso tile C
//--------------------------------------------------------------
// A is iso and so is C; copy the iso entry
GBURBLE ("(iso sparse split) ") ;
memcpy (C->x, A->x, asize) ;
#define GB_ISO_SPLIT
#define GB_COPY(pC,pA) ;
#include "GB_split_sparse_template.c"
}
else
{
//--------------------------------------------------------------
// split a non-iso matrix A into an non-iso tile C
//--------------------------------------------------------------
#ifndef GBCUDA_DEV
// no typecasting needed
switch (asize)
{
#undef GB_COPY
#define GB_COPY(pC,pA) Cx [pC] = Ax [pA] ;
case GB_1BYTE : // uint8, int8, bool, or 1-byte user-defined
#define GB_CTYPE uint8_t
#include "GB_split_sparse_template.c"
break ;
case GB_2BYTE : // uint16, int16, or 2-byte user-defined
#define GB_CTYPE uint16_t
#include "GB_split_sparse_template.c"
break ;
case GB_4BYTE : // uint32, int32, float, or 4-byte user
#define GB_CTYPE uint32_t
#include "GB_split_sparse_template.c"
break ;
case GB_8BYTE : // uint64, int64, double, float complex,
// or 8-byte user defined
#define GB_CTYPE uint64_t
#include "GB_split_sparse_template.c"
break ;
case GB_16BYTE : // double complex or 16-byte user-defined
#define GB_CTYPE GB_blob16
// #define GB_CTYPE uint64_t
// #undef GB_COPY
// #define GB_COPY(pC,pA) \
// Cx [2*pC ] = Ax [2*pA ] ; \
// Cx [2*pC+1] = Ax [2*pA+1] ;
#include "GB_split_sparse_template.c"
break ;
default:;
}
#endif
}
if (!done)
{
// user-defined types
#define GB_CTYPE GB_void
#undef GB_COPY
#define GB_COPY(pC,pA) \
memcpy (Cx + (pC)*asize, Ax +(pA)*asize, asize) ;
#include "GB_split_sparse_template.c"
}
//------------------------------------------------------------------
// free workspace
//------------------------------------------------------------------
GB_WERK_POP (C_ek_slicing, int64_t) ;
//------------------------------------------------------------------
// advance to the next tile
//------------------------------------------------------------------
if (inner < ninner - 1)
{
int64_t k ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (k = akstart ; k < akend ; k++)
{
int64_t ck = k - akstart ;
int64_t cknz = Cp [ck+1] - Cp [ck] ;
Wp [k] += cknz ;
}
}
//------------------------------------------------------------------
// conform the tile and save it in the Tiles array
//------------------------------------------------------------------
ASSERT_MATRIX_OK (C, "C for GB_split", GB0) ;
GB_OK (GB_hypermatrix_prune (C, Context)) ;
GB_OK (GB_conform (C, Context)) ;
if (csc)
{
GB_TILE (Tiles, inner, outer) = C ;
}
else
{
GB_TILE (Tiles, outer, inner) = C ;
}
ASSERT_MATRIX_OK (C, "final tile C for GB_split", GB0) ;
C = NULL ;
}
}
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
}
|
mxm.c | #include "pmlib_api_C.h"
#include <string.h>
#include <math.h>
#include <stdio.h>
void init2d();
void mxm2d();
#define MATSIZE 1000
int nsize;
struct matrix {
int nsize;
float a2[MATSIZE][MATSIZE];
float b2[MATSIZE][MATSIZE];
float c2[MATSIZE][MATSIZE];
} matrix;
int main (int argc, char *argv[])
{
// int ninit=3;
// C_pm_initialize(ninit);
// C_pm_start("A:init2d");
init2d();
// C_pm_stop ("A:init2d");
// C_pm_start("B:mxm2d");
mxm2d();
// C_pm_stop ("B:mxm2d");
// C_pm_report (stdout);
printf("something was computed... %f\n",matrix.c2[0][0]);
return 0;
}
void init2d()
{
int i, j, nsize;
matrix.nsize = MATSIZE;
nsize = matrix.nsize;
// #pragma omp parallel for private(i,j)
for (i=0; i<nsize; i++){
for (j=0; j<nsize; j++){
matrix.a2[i][j] = sin((float)j/(float)nsize);
matrix.b2[i][j] = cos((float)j/(float)nsize);
matrix.c2[i][j] = 0.0;
}
}
}
void mxm2d()
{
int i, j, k, nsize;
float c1,c2,c3;
nsize = matrix.nsize;
// #pragma omp parallel for private(i,j,k,c1)
for (i=0; i<nsize; i++){
for (j=0; j<nsize; j++){
c1=0.0;
for (k=0; k<nsize; k++){
c1=c1 + matrix.a2[k][i] * matrix.b2[j][k];
}
matrix.c2[j][i] = c1;
}
}
}
|
kernel_prob_reshaping.c | /* Generated by Cython 0.29.22 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [
"/home/aduran/Atinary/gryffin/venv/lib/python3.8/site-packages/numpy/core/include/numpy/arrayobject.h",
"/home/aduran/Atinary/gryffin/venv/lib/python3.8/site-packages/numpy/core/include/numpy/ufuncobject.h"
],
"extra_compile_args": [
"-fopenmp"
],
"extra_link_args": [
"-fopenmp"
],
"include_dirs": [
"/home/aduran/Atinary/gryffin/venv/lib/python3.8/site-packages/numpy/core/include",
"."
],
"name": "kernel_prob_reshaping",
"sources": [
"kernel_prob_reshaping.pyx"
]
},
"module_name": "kernel_prob_reshaping"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_22"
#define CYTHON_HEX_VERSION 0x001D16F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__kernel_prob_reshaping
#define __PYX_HAVE_API__kernel_prob_reshaping
/* Early includes */
#include <string.h>
#include <stdio.h>
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
/* NumPy API declarations from "numpy/__init__.pxd" */
#include <math.h>
#include "pythread.h"
#include <stdlib.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* Header.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"kernel_prob_reshaping.pyx",
"stringsource",
"__init__.pxd",
"type.pxd",
};
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":689
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":690
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":691
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":692
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":696
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":697
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":698
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":699
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":703
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":704
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":713
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":714
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":715
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":717
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":718
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":719
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":721
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":722
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":724
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":725
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":726
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/*--- Type declarations ---*/
struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper;
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":728
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":729
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":730
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":732
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* "kernel_prob_reshaping.pyx":14
* #========================================================================
*
* cdef class KernelReshaper: # <<<<<<<<<<<<<<
*
* cdef int num_samples, num_obs, num_kernels, num_descriptors
*/
struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper {
PyObject_HEAD
struct __pyx_vtabstruct_21kernel_prob_reshaping_KernelReshaper *__pyx_vtab;
int num_samples;
int num_obs;
int num_kernels;
int num_descriptors;
PyArrayObject *np_recomputed_probs;
PyArrayObject *np_all_distances;
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":279
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "kernel_prob_reshaping.pyx":14
* #========================================================================
*
* cdef class KernelReshaper: # <<<<<<<<<<<<<<
*
* cdef int num_samples, num_obs, num_kernels, num_descriptors
*/
struct __pyx_vtabstruct_21kernel_prob_reshaping_KernelReshaper {
__Pyx_memviewslice (*_reshape_probs)(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *, __Pyx_memviewslice, __Pyx_memviewslice);
PyObject *(*reshape_probs)(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *, PyArrayObject *, PyArrayObject *, int __pyx_skip_dispatch);
};
static struct __pyx_vtabstruct_21kernel_prob_reshaping_KernelReshaper *__pyx_vtabptr_21kernel_prob_reshaping_KernelReshaper;
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* KeywordStringCheck.proto */
static int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed);
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* None.proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* None.proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* PyObjectGetAttrStrNoError.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* TypeImport.proto */
#ifndef __PYX_HAVE_RT_ImportType_proto
#define __PYX_HAVE_RT_ImportType_proto
enum __Pyx_ImportType_CheckSize {
__Pyx_ImportType_CheckSize_Error = 0,
__Pyx_ImportType_CheckSize_Warn = 1,
__Pyx_ImportType_CheckSize_Ignore = 2
};
static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
#endif
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* GCCDiagnostics.proto */
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#define __Pyx_HAS_GCC_DIAGNOSTIC
#endif
/* MemviewDtypeToObject.proto */
static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp);
static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj);
/* RealImag.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX\
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_float(a, b) ((a)==(b))
#define __Pyx_c_sum_float(a, b) ((a)+(b))
#define __Pyx_c_diff_float(a, b) ((a)-(b))
#define __Pyx_c_prod_float(a, b) ((a)*(b))
#define __Pyx_c_quot_float(a, b) ((a)/(b))
#define __Pyx_c_neg_float(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_float(z) ((z)==(float)0)
#define __Pyx_c_conj_float(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_float(z) (::std::abs(z))
#define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_float(z) ((z)==0)
#define __Pyx_c_conj_float(z) (conjf(z))
#if 1
#define __Pyx_c_abs_float(z) (cabsf(z))
#define __Pyx_c_pow_float(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_double(a, b) ((a)==(b))
#define __Pyx_c_sum_double(a, b) ((a)+(b))
#define __Pyx_c_diff_double(a, b) ((a)-(b))
#define __Pyx_c_prod_double(a, b) ((a)*(b))
#define __Pyx_c_quot_double(a, b) ((a)/(b))
#define __Pyx_c_neg_double(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_double(z) ((z)==(double)0)
#define __Pyx_c_conj_double(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_double(z) (::std::abs(z))
#define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_double(z) ((z)==0)
#define __Pyx_c_conj_double(z) (conj(z))
#if 1
#define __Pyx_c_abs_double(z) (cabs(z))
#define __Pyx_c_pow_double(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *, int writable_flag);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static __Pyx_memviewslice __pyx_f_21kernel_prob_reshaping_14KernelReshaper__reshape_probs(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self, __Pyx_memviewslice __pyx_v_cat_probs, __Pyx_memviewslice __pyx_v_descriptors); /* proto*/
static PyObject *__pyx_f_21kernel_prob_reshaping_14KernelReshaper_reshape_probs(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self, PyArrayObject *__pyx_v_cat_probs, PyArrayObject *__pyx_v_descriptors, int __pyx_skip_dispatch); /* proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
/* Module declarations from 'libc.math' */
/* Module declarations from 'kernel_prob_reshaping' */
static PyTypeObject *__pyx_ptype_21kernel_prob_reshaping_KernelReshaper = 0;
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static PyObject *__pyx_f_21kernel_prob_reshaping___pyx_unpickle_KernelReshaper__set_state(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *, PyObject *); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "kernel_prob_reshaping"
extern int __pyx_module_is_main_kernel_prob_reshaping;
int __pyx_module_is_main_kernel_prob_reshaping = 0;
/* Implementation of 'kernel_prob_reshaping' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ImportError;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_array[] = "array";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_cat_probs[] = "cat_probs";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_ImportError[] = "ImportError";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_descriptors[] = "descriptors";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_reshape_probs[] = "reshape_probs";
static const char __pyx_k_KernelReshaper[] = "KernelReshaper";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_kernel_prob_reshaping[] = "kernel_prob_reshaping";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_pyx_unpickle_KernelReshaper[] = "__pyx_unpickle_KernelReshaper";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Incompatible_checksums_s_vs_0x9c[] = "Incompatible checksums (%s vs 0x9c5b774 = (np_all_distances, np_recomputed_probs, num_descriptors, num_kernels, num_obs, num_samples))";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_n_s_ImportError;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0x9c;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_KernelReshaper;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_array;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_cat_probs;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_descriptors;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_kernel_prob_reshaping;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to;
static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_unpickle_KernelReshaper;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_reshape_probs;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_n_s_zeros;
static int __pyx_pf_21kernel_prob_reshaping_14KernelReshaper___init__(CYTHON_UNUSED struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_21kernel_prob_reshaping_14KernelReshaper_2reshape_probs(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self, PyArrayObject *__pyx_v_cat_probs, PyArrayObject *__pyx_v_descriptors); /* proto */
static PyObject *__pyx_pf_21kernel_prob_reshaping_14KernelReshaper_4__reduce_cython__(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_21kernel_prob_reshaping_14KernelReshaper_6__setstate_cython__(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_21kernel_prob_reshaping___pyx_unpickle_KernelReshaper(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_21kernel_prob_reshaping_KernelReshaper(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_163952500;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__17;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_tuple__26;
static PyObject *__pyx_tuple__27;
static PyObject *__pyx_tuple__28;
static PyObject *__pyx_codeobj__22;
static PyObject *__pyx_codeobj__29;
/* Late includes */
/* "kernel_prob_reshaping.pyx":20
* cdef np.ndarray np_all_distances
*
* def __init__(self): # <<<<<<<<<<<<<<
*
* pass
*/
/* Python wrapper */
static int __pyx_pw_21kernel_prob_reshaping_14KernelReshaper_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_21kernel_prob_reshaping_14KernelReshaper_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
if (unlikely(PyTuple_GET_SIZE(__pyx_args) > 0)) {
__Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); return -1;}
if (unlikely(__pyx_kwds) && unlikely(PyDict_Size(__pyx_kwds) > 0) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__init__", 0))) return -1;
__pyx_r = __pyx_pf_21kernel_prob_reshaping_14KernelReshaper___init__(((struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_21kernel_prob_reshaping_14KernelReshaper___init__(CYTHON_UNUSED struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "kernel_prob_reshaping.pyx":27
* @cython.cdivision(True)
* @cython.boundscheck(False)
* cdef double [:, :, :] _reshape_probs(self, double [:, :, :] cat_probs, double [:, :] descriptors): # <<<<<<<<<<<<<<
*
* cdef double [:, :, :] recomputed_probs = self.np_recomputed_probs
*/
static __Pyx_memviewslice __pyx_f_21kernel_prob_reshaping_14KernelReshaper__reshape_probs(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self, __Pyx_memviewslice __pyx_v_cat_probs, __Pyx_memviewslice __pyx_v_descriptors) {
__Pyx_memviewslice __pyx_v_recomputed_probs = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_all_distances = { 0, 0, { 0 }, { 0 }, { 0 } };
double __pyx_v_ds2;
double __pyx_v_dyi;
double __pyx_v_sum_distances;
double __pyx_v_averaged_descriptor;
int __pyx_v_sample_index;
int __pyx_v_obs_index;
int __pyx_v_target_cat_index;
int __pyx_v_desc_index;
int __pyx_v_kernel_index;
__Pyx_memviewslice __pyx_r = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
Py_ssize_t __pyx_t_17;
Py_ssize_t __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_reshape_probs", 0);
/* "kernel_prob_reshaping.pyx":29
* cdef double [:, :, :] _reshape_probs(self, double [:, :, :] cat_probs, double [:, :] descriptors):
*
* cdef double [:, :, :] recomputed_probs = self.np_recomputed_probs # <<<<<<<<<<<<<<
* cdef double [:, :, :] all_distances = self.np_all_distances
*
*/
__pyx_t_1 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(((PyObject *)__pyx_v_self->np_recomputed_probs), PyBUF_WRITABLE); if (unlikely(!__pyx_t_1.memview)) __PYX_ERR(0, 29, __pyx_L1_error)
__pyx_v_recomputed_probs = __pyx_t_1;
__pyx_t_1.memview = NULL;
__pyx_t_1.data = NULL;
/* "kernel_prob_reshaping.pyx":30
*
* cdef double [:, :, :] recomputed_probs = self.np_recomputed_probs
* cdef double [:, :, :] all_distances = self.np_all_distances # <<<<<<<<<<<<<<
*
* cdef double ds2, dyi, sum_distances
*/
__pyx_t_1 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(((PyObject *)__pyx_v_self->np_all_distances), PyBUF_WRITABLE); if (unlikely(!__pyx_t_1.memview)) __PYX_ERR(0, 30, __pyx_L1_error)
__pyx_v_all_distances = __pyx_t_1;
__pyx_t_1.memview = NULL;
__pyx_t_1.data = NULL;
/* "kernel_prob_reshaping.pyx":37
* cdef int sample_index, obs_index, target_cat_index, desc_index, kernel_index
*
* for sample_index in prange(self.num_samples, nogil = True): # <<<<<<<<<<<<<<
*
* for obs_index in range(self.num_obs):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_2 = __pyx_v_self->num_samples;
if ((1 == 0)) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_4 = (__pyx_t_2 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_4 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_averaged_descriptor) lastprivate(__pyx_v_desc_index) lastprivate(__pyx_v_ds2) lastprivate(__pyx_v_dyi) lastprivate(__pyx_v_kernel_index) lastprivate(__pyx_v_obs_index) firstprivate(__pyx_v_sample_index) lastprivate(__pyx_v_sample_index) lastprivate(__pyx_v_sum_distances) lastprivate(__pyx_v_target_cat_index)
#endif /* _OPENMP */
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){
{
__pyx_v_sample_index = (int)(0 + 1 * __pyx_t_3);
/* Initialize private variables to invalid values */
__pyx_v_averaged_descriptor = ((double)__PYX_NAN());
__pyx_v_desc_index = ((int)0xbad0bad0);
__pyx_v_ds2 = ((double)__PYX_NAN());
__pyx_v_dyi = ((double)__PYX_NAN());
__pyx_v_kernel_index = ((int)0xbad0bad0);
__pyx_v_obs_index = ((int)0xbad0bad0);
__pyx_v_sum_distances = ((double)__PYX_NAN());
__pyx_v_target_cat_index = ((int)0xbad0bad0);
/* "kernel_prob_reshaping.pyx":39
* for sample_index in prange(self.num_samples, nogil = True):
*
* for obs_index in range(self.num_obs): # <<<<<<<<<<<<<<
*
* for target_cat_index in range(self.num_kernels):
*/
__pyx_t_5 = __pyx_v_self->num_obs;
__pyx_t_6 = __pyx_t_5;
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) {
__pyx_v_obs_index = __pyx_t_7;
/* "kernel_prob_reshaping.pyx":41
* for obs_index in range(self.num_obs):
*
* for target_cat_index in range(self.num_kernels): # <<<<<<<<<<<<<<
*
* ds2 = 0.
*/
__pyx_t_8 = __pyx_v_self->num_kernels;
__pyx_t_9 = __pyx_t_8;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_target_cat_index = __pyx_t_10;
/* "kernel_prob_reshaping.pyx":43
* for target_cat_index in range(self.num_kernels):
*
* ds2 = 0. # <<<<<<<<<<<<<<
*
* for desc_index in range(self.num_descriptors):
*/
__pyx_v_ds2 = 0.;
/* "kernel_prob_reshaping.pyx":45
* ds2 = 0.
*
* for desc_index in range(self.num_descriptors): # <<<<<<<<<<<<<<
*
* averaged_descriptor = 0.
*/
__pyx_t_11 = __pyx_v_self->num_descriptors;
__pyx_t_12 = __pyx_t_11;
for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) {
__pyx_v_desc_index = __pyx_t_13;
/* "kernel_prob_reshaping.pyx":47
* for desc_index in range(self.num_descriptors):
*
* averaged_descriptor = 0. # <<<<<<<<<<<<<<
* for kernel_index in range(self.num_kernels):
* averaged_descriptor = cat_probs[sample_index, obs_index, kernel_index] * descriptors[kernel_index, desc_index] + averaged_descriptor
*/
__pyx_v_averaged_descriptor = 0.;
/* "kernel_prob_reshaping.pyx":48
*
* averaged_descriptor = 0.
* for kernel_index in range(self.num_kernels): # <<<<<<<<<<<<<<
* averaged_descriptor = cat_probs[sample_index, obs_index, kernel_index] * descriptors[kernel_index, desc_index] + averaged_descriptor
*
*/
__pyx_t_14 = __pyx_v_self->num_kernels;
__pyx_t_15 = __pyx_t_14;
for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
__pyx_v_kernel_index = __pyx_t_16;
/* "kernel_prob_reshaping.pyx":49
* averaged_descriptor = 0.
* for kernel_index in range(self.num_kernels):
* averaged_descriptor = cat_probs[sample_index, obs_index, kernel_index] * descriptors[kernel_index, desc_index] + averaged_descriptor # <<<<<<<<<<<<<<
*
* dyi = self.num_kernels * (descriptors[target_cat_index, desc_index] - averaged_descriptor)
*/
__pyx_t_17 = __pyx_v_sample_index;
__pyx_t_18 = __pyx_v_obs_index;
__pyx_t_19 = __pyx_v_kernel_index;
if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_cat_probs.shape[0];
if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_cat_probs.shape[1];
if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_cat_probs.shape[2];
__pyx_t_20 = __pyx_v_kernel_index;
__pyx_t_21 = __pyx_v_desc_index;
if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_descriptors.shape[0];
if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_descriptors.shape[1];
__pyx_v_averaged_descriptor = (((*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_cat_probs.data + __pyx_t_17 * __pyx_v_cat_probs.strides[0]) ) + __pyx_t_18 * __pyx_v_cat_probs.strides[1]) ) + __pyx_t_19 * __pyx_v_cat_probs.strides[2]) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_descriptors.data + __pyx_t_20 * __pyx_v_descriptors.strides[0]) ) + __pyx_t_21 * __pyx_v_descriptors.strides[1]) )))) + __pyx_v_averaged_descriptor);
}
/* "kernel_prob_reshaping.pyx":51
* averaged_descriptor = cat_probs[sample_index, obs_index, kernel_index] * descriptors[kernel_index, desc_index] + averaged_descriptor
*
* dyi = self.num_kernels * (descriptors[target_cat_index, desc_index] - averaged_descriptor) # <<<<<<<<<<<<<<
* ds2 = ds2 + dyi*dyi
*
*/
__pyx_t_21 = __pyx_v_target_cat_index;
__pyx_t_20 = __pyx_v_desc_index;
if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_descriptors.shape[0];
if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_descriptors.shape[1];
__pyx_v_dyi = (__pyx_v_self->num_kernels * ((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_descriptors.data + __pyx_t_21 * __pyx_v_descriptors.strides[0]) ) + __pyx_t_20 * __pyx_v_descriptors.strides[1]) ))) - __pyx_v_averaged_descriptor));
/* "kernel_prob_reshaping.pyx":52
*
* dyi = self.num_kernels * (descriptors[target_cat_index, desc_index] - averaged_descriptor)
* ds2 = ds2 + dyi*dyi # <<<<<<<<<<<<<<
*
* all_distances[sample_index, obs_index, target_cat_index] = sqrt(ds2 / self.num_descriptors)
*/
__pyx_v_ds2 = (__pyx_v_ds2 + (__pyx_v_dyi * __pyx_v_dyi));
}
/* "kernel_prob_reshaping.pyx":54
* ds2 = ds2 + dyi*dyi
*
* all_distances[sample_index, obs_index, target_cat_index] = sqrt(ds2 / self.num_descriptors) # <<<<<<<<<<<<<<
*
* # got all distances, compute probs from distances
*/
__pyx_t_20 = __pyx_v_sample_index;
__pyx_t_21 = __pyx_v_obs_index;
__pyx_t_19 = __pyx_v_target_cat_index;
if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_all_distances.shape[0];
if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_all_distances.shape[1];
if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_all_distances.shape[2];
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_all_distances.data + __pyx_t_20 * __pyx_v_all_distances.strides[0]) ) + __pyx_t_21 * __pyx_v_all_distances.strides[1]) ) + __pyx_t_19 * __pyx_v_all_distances.strides[2]) )) = sqrt((__pyx_v_ds2 / __pyx_v_self->num_descriptors));
}
/* "kernel_prob_reshaping.pyx":57
*
* # got all distances, compute probs from distances
* sum_distances = 0. # <<<<<<<<<<<<<<
* for kernel_index in range(self.num_kernels):
* sum_distances = sum_distances + exp( - all_distances[sample_index, obs_index, kernel_index])
*/
__pyx_v_sum_distances = 0.;
/* "kernel_prob_reshaping.pyx":58
* # got all distances, compute probs from distances
* sum_distances = 0.
* for kernel_index in range(self.num_kernels): # <<<<<<<<<<<<<<
* sum_distances = sum_distances + exp( - all_distances[sample_index, obs_index, kernel_index])
*
*/
__pyx_t_8 = __pyx_v_self->num_kernels;
__pyx_t_9 = __pyx_t_8;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_kernel_index = __pyx_t_10;
/* "kernel_prob_reshaping.pyx":59
* sum_distances = 0.
* for kernel_index in range(self.num_kernels):
* sum_distances = sum_distances + exp( - all_distances[sample_index, obs_index, kernel_index]) # <<<<<<<<<<<<<<
*
* for kernel_index in range(self.num_kernels):
*/
__pyx_t_19 = __pyx_v_sample_index;
__pyx_t_21 = __pyx_v_obs_index;
__pyx_t_20 = __pyx_v_kernel_index;
if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_all_distances.shape[0];
if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_all_distances.shape[1];
if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_all_distances.shape[2];
__pyx_v_sum_distances = (__pyx_v_sum_distances + exp((-(*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_all_distances.data + __pyx_t_19 * __pyx_v_all_distances.strides[0]) ) + __pyx_t_21 * __pyx_v_all_distances.strides[1]) ) + __pyx_t_20 * __pyx_v_all_distances.strides[2]) ))))));
}
/* "kernel_prob_reshaping.pyx":61
* sum_distances = sum_distances + exp( - all_distances[sample_index, obs_index, kernel_index])
*
* for kernel_index in range(self.num_kernels): # <<<<<<<<<<<<<<
* recomputed_probs[sample_index, obs_index, kernel_index] = exp( - all_distances[sample_index, obs_index, kernel_index]) / sum_distances
*
*/
__pyx_t_8 = __pyx_v_self->num_kernels;
__pyx_t_9 = __pyx_t_8;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_kernel_index = __pyx_t_10;
/* "kernel_prob_reshaping.pyx":62
*
* for kernel_index in range(self.num_kernels):
* recomputed_probs[sample_index, obs_index, kernel_index] = exp( - all_distances[sample_index, obs_index, kernel_index]) / sum_distances # <<<<<<<<<<<<<<
*
* return recomputed_probs
*/
__pyx_t_20 = __pyx_v_sample_index;
__pyx_t_21 = __pyx_v_obs_index;
__pyx_t_19 = __pyx_v_kernel_index;
if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_all_distances.shape[0];
if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_all_distances.shape[1];
if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_all_distances.shape[2];
__pyx_t_18 = __pyx_v_sample_index;
__pyx_t_17 = __pyx_v_obs_index;
__pyx_t_22 = __pyx_v_kernel_index;
if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_recomputed_probs.shape[0];
if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_recomputed_probs.shape[1];
if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_v_recomputed_probs.shape[2];
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_recomputed_probs.data + __pyx_t_18 * __pyx_v_recomputed_probs.strides[0]) ) + __pyx_t_17 * __pyx_v_recomputed_probs.strides[1]) ) + __pyx_t_22 * __pyx_v_recomputed_probs.strides[2]) )) = (exp((-(*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_all_distances.data + __pyx_t_20 * __pyx_v_all_distances.strides[0]) ) + __pyx_t_21 * __pyx_v_all_distances.strides[1]) ) + __pyx_t_19 * __pyx_v_all_distances.strides[2]) ))))) / __pyx_v_sum_distances);
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "kernel_prob_reshaping.pyx":37
* cdef int sample_index, obs_index, target_cat_index, desc_index, kernel_index
*
* for sample_index in prange(self.num_samples, nogil = True): # <<<<<<<<<<<<<<
*
* for obs_index in range(self.num_obs):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "kernel_prob_reshaping.pyx":64
* recomputed_probs[sample_index, obs_index, kernel_index] = exp( - all_distances[sample_index, obs_index, kernel_index]) / sum_distances
*
* return recomputed_probs # <<<<<<<<<<<<<<
*
*
*/
__PYX_INC_MEMVIEW(&__pyx_v_recomputed_probs, 0);
__pyx_r = __pyx_v_recomputed_probs;
goto __pyx_L0;
/* "kernel_prob_reshaping.pyx":27
* @cython.cdivision(True)
* @cython.boundscheck(False)
* cdef double [:, :, :] _reshape_probs(self, double [:, :, :] cat_probs, double [:, :] descriptors): # <<<<<<<<<<<<<<
*
* cdef double [:, :, :] recomputed_probs = self.np_recomputed_probs
*/
/* function exit code */
__pyx_L1_error:;
__PYX_XDEC_MEMVIEW(&__pyx_t_1, 1);
__pyx_r.data = NULL;
__pyx_r.memview = NULL;
__Pyx_AddTraceback("kernel_prob_reshaping.KernelReshaper._reshape_probs", __pyx_clineno, __pyx_lineno, __pyx_filename);
goto __pyx_L2;
__pyx_L0:;
if (unlikely(!__pyx_r.memview)) {
PyErr_SetString(PyExc_TypeError, "Memoryview return value is not initialized");
}
__pyx_L2:;
__PYX_XDEC_MEMVIEW(&__pyx_v_recomputed_probs, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_all_distances, 1);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "kernel_prob_reshaping.pyx":68
*
*
* cpdef reshape_probs(self, np.ndarray cat_probs, np.ndarray descriptors): # <<<<<<<<<<<<<<
*
* self.num_samples = cat_probs.shape[0]
*/
static PyObject *__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_3reshape_probs(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_21kernel_prob_reshaping_14KernelReshaper_reshape_probs(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self, PyArrayObject *__pyx_v_cat_probs, PyArrayObject *__pyx_v_descriptors, int __pyx_skip_dispatch) {
__Pyx_memviewslice __pyx_v_cat_probs_memview = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_descriptors_memview = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_v_reshaped_probs = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
__Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_9 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("reshape_probs", 0);
/* Check if called by wrapper */
if (unlikely(__pyx_skip_dispatch)) ;
/* Check if overridden in Python */
else if (unlikely((Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0) || (Py_TYPE(((PyObject *)__pyx_v_self))->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) {
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
static PY_UINT64_T __pyx_tp_dict_version = __PYX_DICT_VERSION_INIT, __pyx_obj_dict_version = __PYX_DICT_VERSION_INIT;
if (unlikely(!__Pyx_object_dict_version_matches(((PyObject *)__pyx_v_self), __pyx_tp_dict_version, __pyx_obj_dict_version))) {
PY_UINT64_T __pyx_type_dict_guard = __Pyx_get_tp_dict_version(((PyObject *)__pyx_v_self));
#endif
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_reshape_probs); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (PyCFunction)(void*)__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_3reshape_probs)) {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_t_1);
__pyx_t_3 = __pyx_t_1; __pyx_t_4 = NULL;
__pyx_t_5 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
__pyx_t_5 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)__pyx_v_cat_probs), ((PyObject *)__pyx_v_descriptors)};
__pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)__pyx_v_cat_probs), ((PyObject *)__pyx_v_descriptors)};
__pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else
#endif
{
__pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (__pyx_t_4) {
__Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL;
}
__Pyx_INCREF(((PyObject *)__pyx_v_cat_probs));
__Pyx_GIVEREF(((PyObject *)__pyx_v_cat_probs));
PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, ((PyObject *)__pyx_v_cat_probs));
__Pyx_INCREF(((PyObject *)__pyx_v_descriptors));
__Pyx_GIVEREF(((PyObject *)__pyx_v_descriptors));
PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, ((PyObject *)__pyx_v_descriptors));
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
goto __pyx_L0;
}
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
__pyx_tp_dict_version = __Pyx_get_tp_dict_version(((PyObject *)__pyx_v_self));
__pyx_obj_dict_version = __Pyx_get_object_dict_version(((PyObject *)__pyx_v_self));
if (unlikely(__pyx_type_dict_guard != __pyx_tp_dict_version)) {
__pyx_tp_dict_version = __pyx_obj_dict_version = __PYX_DICT_VERSION_INIT;
}
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
}
#endif
}
/* "kernel_prob_reshaping.pyx":70
* cpdef reshape_probs(self, np.ndarray cat_probs, np.ndarray descriptors):
*
* self.num_samples = cat_probs.shape[0] # <<<<<<<<<<<<<<
* self.num_obs = cat_probs.shape[1]
* self.num_kernels = cat_probs.shape[2]
*/
__pyx_v_self->num_samples = (__pyx_v_cat_probs->dimensions[0]);
/* "kernel_prob_reshaping.pyx":71
*
* self.num_samples = cat_probs.shape[0]
* self.num_obs = cat_probs.shape[1] # <<<<<<<<<<<<<<
* self.num_kernels = cat_probs.shape[2]
* self.num_descriptors = descriptors.shape[1]
*/
__pyx_v_self->num_obs = (__pyx_v_cat_probs->dimensions[1]);
/* "kernel_prob_reshaping.pyx":72
* self.num_samples = cat_probs.shape[0]
* self.num_obs = cat_probs.shape[1]
* self.num_kernels = cat_probs.shape[2] # <<<<<<<<<<<<<<
* self.num_descriptors = descriptors.shape[1]
*
*/
__pyx_v_self->num_kernels = (__pyx_v_cat_probs->dimensions[2]);
/* "kernel_prob_reshaping.pyx":73
* self.num_obs = cat_probs.shape[1]
* self.num_kernels = cat_probs.shape[2]
* self.num_descriptors = descriptors.shape[1] # <<<<<<<<<<<<<<
*
* self.np_recomputed_probs = np.zeros((self.num_samples, self.num_obs, self.num_kernels))
*/
__pyx_v_self->num_descriptors = (__pyx_v_descriptors->dimensions[1]);
/* "kernel_prob_reshaping.pyx":75
* self.num_descriptors = descriptors.shape[1]
*
* self.np_recomputed_probs = np.zeros((self.num_samples, self.num_obs, self.num_kernels)) # <<<<<<<<<<<<<<
* self.np_all_distances = np.zeros((self.num_samples, self.num_obs, self.num_kernels))
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->num_samples); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_self->num_obs); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_self->num_kernels); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_6 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_7);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->np_recomputed_probs);
__Pyx_DECREF(((PyObject *)__pyx_v_self->np_recomputed_probs));
__pyx_v_self->np_recomputed_probs = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "kernel_prob_reshaping.pyx":76
*
* self.np_recomputed_probs = np.zeros((self.num_samples, self.num_obs, self.num_kernels))
* self.np_all_distances = np.zeros((self.num_samples, self.num_obs, self.num_kernels)) # <<<<<<<<<<<<<<
*
* cdef double [:, :, :] cat_probs_memview = cat_probs
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_self->num_samples); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_self->num_obs); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_self->num_kernels); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_6);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_6, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_2);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->np_all_distances);
__Pyx_DECREF(((PyObject *)__pyx_v_self->np_all_distances));
__pyx_v_self->np_all_distances = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "kernel_prob_reshaping.pyx":78
* self.np_all_distances = np.zeros((self.num_samples, self.num_obs, self.num_kernels))
*
* cdef double [:, :, :] cat_probs_memview = cat_probs # <<<<<<<<<<<<<<
* cdef double [:, :] descriptors_memview = descriptors
*
*/
__pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(((PyObject *)__pyx_v_cat_probs), PyBUF_WRITABLE); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 78, __pyx_L1_error)
__pyx_v_cat_probs_memview = __pyx_t_8;
__pyx_t_8.memview = NULL;
__pyx_t_8.data = NULL;
/* "kernel_prob_reshaping.pyx":79
*
* cdef double [:, :, :] cat_probs_memview = cat_probs
* cdef double [:, :] descriptors_memview = descriptors # <<<<<<<<<<<<<<
*
* reshaped_probs = self._reshape_probs(cat_probs_memview, descriptors_memview)
*/
__pyx_t_9 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(((PyObject *)__pyx_v_descriptors), PyBUF_WRITABLE); if (unlikely(!__pyx_t_9.memview)) __PYX_ERR(0, 79, __pyx_L1_error)
__pyx_v_descriptors_memview = __pyx_t_9;
__pyx_t_9.memview = NULL;
__pyx_t_9.data = NULL;
/* "kernel_prob_reshaping.pyx":81
* cdef double [:, :] descriptors_memview = descriptors
*
* reshaped_probs = self._reshape_probs(cat_probs_memview, descriptors_memview) # <<<<<<<<<<<<<<
* return np.array(reshaped_probs)
*
*/
__pyx_t_8 = ((struct __pyx_vtabstruct_21kernel_prob_reshaping_KernelReshaper *)__pyx_v_self->__pyx_vtab)->_reshape_probs(__pyx_v_self, __pyx_v_cat_probs_memview, __pyx_v_descriptors_memview); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 81, __pyx_L1_error)
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_t_8, 3, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 1);
__pyx_t_8.memview = NULL;
__pyx_t_8.data = NULL;
__pyx_v_reshaped_probs = __pyx_t_1;
__pyx_t_1 = 0;
/* "kernel_prob_reshaping.pyx":82
*
* reshaped_probs = self._reshape_probs(cat_probs_memview, descriptors_memview)
* return np.array(reshaped_probs) # <<<<<<<<<<<<<<
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 82, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 82, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_7, __pyx_v_reshaped_probs) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_reshaped_probs);
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 82, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "kernel_prob_reshaping.pyx":68
*
*
* cpdef reshape_probs(self, np.ndarray cat_probs, np.ndarray descriptors): # <<<<<<<<<<<<<<
*
* self.num_samples = cat_probs.shape[0]
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_9, 1);
__Pyx_AddTraceback("kernel_prob_reshaping.KernelReshaper.reshape_probs", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_cat_probs_memview, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_descriptors_memview, 1);
__Pyx_XDECREF(__pyx_v_reshaped_probs);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_3reshape_probs(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_3reshape_probs(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_cat_probs = 0;
PyArrayObject *__pyx_v_descriptors = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("reshape_probs (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_cat_probs,&__pyx_n_s_descriptors,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cat_probs)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_descriptors)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("reshape_probs", 1, 2, 2, 1); __PYX_ERR(0, 68, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "reshape_probs") < 0)) __PYX_ERR(0, 68, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_cat_probs = ((PyArrayObject *)values[0]);
__pyx_v_descriptors = ((PyArrayObject *)values[1]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("reshape_probs", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 68, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("kernel_prob_reshaping.KernelReshaper.reshape_probs", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_cat_probs), __pyx_ptype_5numpy_ndarray, 1, "cat_probs", 0))) __PYX_ERR(0, 68, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_descriptors), __pyx_ptype_5numpy_ndarray, 1, "descriptors", 0))) __PYX_ERR(0, 68, __pyx_L1_error)
__pyx_r = __pyx_pf_21kernel_prob_reshaping_14KernelReshaper_2reshape_probs(((struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *)__pyx_v_self), __pyx_v_cat_probs, __pyx_v_descriptors);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_21kernel_prob_reshaping_14KernelReshaper_2reshape_probs(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self, PyArrayObject *__pyx_v_cat_probs, PyArrayObject *__pyx_v_descriptors) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("reshape_probs", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_f_21kernel_prob_reshaping_14KernelReshaper_reshape_probs(__pyx_v_self, __pyx_v_cat_probs, __pyx_v_descriptors, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("kernel_prob_reshaping.KernelReshaper.reshape_probs", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_21kernel_prob_reshaping_14KernelReshaper_4__reduce_cython__(((struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_21kernel_prob_reshaping_14KernelReshaper_4__reduce_cython__(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.np_all_distances, self.np_recomputed_probs, self.num_descriptors, self.num_kernels, self.num_obs, self.num_samples) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->num_descriptors); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->num_kernels); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_self->num_obs); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_self->num_samples); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(6); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_self->np_all_distances));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self->np_all_distances));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_self->np_all_distances));
__Pyx_INCREF(((PyObject *)__pyx_v_self->np_recomputed_probs));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self->np_recomputed_probs));
PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_self->np_recomputed_probs));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 4, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 5, __pyx_t_4);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_v_state = ((PyObject*)__pyx_t_5);
__pyx_t_5 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.np_all_distances, self.np_recomputed_probs, self.num_descriptors, self.num_kernels, self.num_obs, self.num_samples)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_5 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_v__dict = __pyx_t_5;
__pyx_t_5 = 0;
/* "(tree fragment)":7
* state = (self.np_all_distances, self.np_recomputed_probs, self.num_descriptors, self.num_kernels, self.num_obs, self.num_samples)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_6 = (__pyx_v__dict != Py_None);
__pyx_t_7 = (__pyx_t_6 != 0);
if (__pyx_t_7) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.np_all_distances is not None or self.np_recomputed_probs is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.np_all_distances, self.np_recomputed_probs, self.num_descriptors, self.num_kernels, self.num_obs, self.num_samples)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.np_all_distances is not None or self.np_recomputed_probs is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_KernelReshaper, (type(self), 0x9c5b774, None), state
*/
/*else*/ {
__pyx_t_6 = (((PyObject *)__pyx_v_self->np_all_distances) != Py_None);
__pyx_t_8 = (__pyx_t_6 != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_8 = (((PyObject *)__pyx_v_self->np_recomputed_probs) != Py_None);
__pyx_t_6 = (__pyx_t_8 != 0);
__pyx_t_7 = __pyx_t_6;
__pyx_L4_bool_binop_done:;
__pyx_v_use_setstate = __pyx_t_7;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.np_all_distances is not None or self.np_recomputed_probs is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_KernelReshaper, (type(self), 0x9c5b774, None), state
* else:
*/
__pyx_t_7 = (__pyx_v_use_setstate != 0);
if (__pyx_t_7) {
/* "(tree fragment)":13
* use_setstate = self.np_all_distances is not None or self.np_recomputed_probs is not None
* if use_setstate:
* return __pyx_unpickle_KernelReshaper, (type(self), 0x9c5b774, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_KernelReshaper, (type(self), 0x9c5b774, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_KernelReshaper); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_163952500);
__Pyx_GIVEREF(__pyx_int_163952500);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_163952500);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_5, 2, Py_None);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.np_all_distances is not None or self.np_recomputed_probs is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_KernelReshaper, (type(self), 0x9c5b774, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_KernelReshaper, (type(self), 0x9c5b774, None), state
* else:
* return __pyx_unpickle_KernelReshaper, (type(self), 0x9c5b774, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_KernelReshaper__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_pyx_unpickle_KernelReshaper); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_163952500);
__Pyx_GIVEREF(__pyx_int_163952500);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_163952500);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5);
__pyx_t_3 = 0;
__pyx_t_5 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("kernel_prob_reshaping.KernelReshaper.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_KernelReshaper, (type(self), 0x9c5b774, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_KernelReshaper__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_21kernel_prob_reshaping_14KernelReshaper_6__setstate_cython__(((struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_21kernel_prob_reshaping_14KernelReshaper_6__setstate_cython__(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_KernelReshaper, (type(self), 0x9c5b774, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_KernelReshaper__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_f_21kernel_prob_reshaping___pyx_unpickle_KernelReshaper__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_KernelReshaper, (type(self), 0x9c5b774, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_KernelReshaper__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("kernel_prob_reshaping.KernelReshaper.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __pyx_unpickle_KernelReshaper(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_21kernel_prob_reshaping_1__pyx_unpickle_KernelReshaper(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_21kernel_prob_reshaping_1__pyx_unpickle_KernelReshaper = {"__pyx_unpickle_KernelReshaper", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_21kernel_prob_reshaping_1__pyx_unpickle_KernelReshaper, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_21kernel_prob_reshaping_1__pyx_unpickle_KernelReshaper(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_KernelReshaper (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_KernelReshaper", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_KernelReshaper", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_KernelReshaper") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_KernelReshaper", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("kernel_prob_reshaping.__pyx_unpickle_KernelReshaper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_21kernel_prob_reshaping___pyx_unpickle_KernelReshaper(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_21kernel_prob_reshaping___pyx_unpickle_KernelReshaper(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_KernelReshaper", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0x9c5b774: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x9c5b774 = (np_all_distances, np_recomputed_probs, num_descriptors, num_kernels, num_obs, num_samples))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0x9c5b774) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0x9c5b774:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x9c5b774 = (np_all_distances, np_recomputed_probs, num_descriptors, num_kernels, num_obs, num_samples))" % __pyx_checksum)
* __pyx_result = KernelReshaper.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, -1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0x9c5b774:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x9c5b774 = (np_all_distances, np_recomputed_probs, num_descriptors, num_kernels, num_obs, num_samples))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = KernelReshaper.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0x9c, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0x9c5b774: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x9c5b774 = (np_all_distances, np_recomputed_probs, num_descriptors, num_kernels, num_obs, num_samples))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x9c5b774 = (np_all_distances, np_recomputed_probs, num_descriptors, num_kernels, num_obs, num_samples))" % __pyx_checksum)
* __pyx_result = KernelReshaper.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_KernelReshaper__set_state(<KernelReshaper> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_21kernel_prob_reshaping_KernelReshaper), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x9c5b774 = (np_all_distances, np_recomputed_probs, num_descriptors, num_kernels, num_obs, num_samples))" % __pyx_checksum)
* __pyx_result = KernelReshaper.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_KernelReshaper__set_state(<KernelReshaper> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = KernelReshaper.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_KernelReshaper__set_state(<KernelReshaper> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_KernelReshaper__set_state(KernelReshaper __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_f_21kernel_prob_reshaping___pyx_unpickle_KernelReshaper__set_state(((struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0x9c5b774 = (np_all_distances, np_recomputed_probs, num_descriptors, num_kernels, num_obs, num_samples))" % __pyx_checksum)
* __pyx_result = KernelReshaper.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_KernelReshaper__set_state(<KernelReshaper> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_KernelReshaper__set_state(<KernelReshaper> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_KernelReshaper__set_state(KernelReshaper __pyx_result, tuple __pyx_state):
* __pyx_result.np_all_distances = __pyx_state[0]; __pyx_result.np_recomputed_probs = __pyx_state[1]; __pyx_result.num_descriptors = __pyx_state[2]; __pyx_result.num_kernels = __pyx_state[3]; __pyx_result.num_obs = __pyx_state[4]; __pyx_result.num_samples = __pyx_state[5]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_KernelReshaper(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("kernel_prob_reshaping.__pyx_unpickle_KernelReshaper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_KernelReshaper__set_state(<KernelReshaper> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_KernelReshaper__set_state(KernelReshaper __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.np_all_distances = __pyx_state[0]; __pyx_result.np_recomputed_probs = __pyx_state[1]; __pyx_result.num_descriptors = __pyx_state[2]; __pyx_result.num_kernels = __pyx_state[3]; __pyx_result.num_obs = __pyx_state[4]; __pyx_result.num_samples = __pyx_state[5]
* if len(__pyx_state) > 6 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_f_21kernel_prob_reshaping___pyx_unpickle_KernelReshaper__set_state(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_KernelReshaper__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_KernelReshaper__set_state(KernelReshaper __pyx_result, tuple __pyx_state):
* __pyx_result.np_all_distances = __pyx_state[0]; __pyx_result.np_recomputed_probs = __pyx_state[1]; __pyx_result.num_descriptors = __pyx_state[2]; __pyx_result.num_kernels = __pyx_state[3]; __pyx_result.num_obs = __pyx_state[4]; __pyx_result.num_samples = __pyx_state[5] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 6 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[6])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->np_all_distances);
__Pyx_DECREF(((PyObject *)__pyx_v___pyx_result->np_all_distances));
__pyx_v___pyx_result->np_all_distances = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->np_recomputed_probs);
__Pyx_DECREF(((PyObject *)__pyx_v___pyx_result->np_recomputed_probs));
__pyx_v___pyx_result->np_recomputed_probs = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->num_descriptors = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->num_kernels = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 4, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->num_obs = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 5, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->num_samples = __pyx_t_2;
/* "(tree fragment)":13
* cdef __pyx_unpickle_KernelReshaper__set_state(KernelReshaper __pyx_result, tuple __pyx_state):
* __pyx_result.np_all_distances = __pyx_state[0]; __pyx_result.np_recomputed_probs = __pyx_state[1]; __pyx_result.num_descriptors = __pyx_state[2]; __pyx_result.num_kernels = __pyx_state[3]; __pyx_result.num_obs = __pyx_state[4]; __pyx_result.num_samples = __pyx_state[5]
* if len(__pyx_state) > 6 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[6])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_4 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = ((__pyx_t_4 > 6) != 0);
if (__pyx_t_5) {
} else {
__pyx_t_3 = __pyx_t_5;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_5 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_6 = (__pyx_t_5 != 0);
__pyx_t_3 = __pyx_t_6;
__pyx_L4_bool_binop_done:;
if (__pyx_t_3) {
/* "(tree fragment)":14
* __pyx_result.np_all_distances = __pyx_state[0]; __pyx_result.np_recomputed_probs = __pyx_state[1]; __pyx_result.num_descriptors = __pyx_state[2]; __pyx_result.num_kernels = __pyx_state[3]; __pyx_result.num_obs = __pyx_state[4]; __pyx_result.num_samples = __pyx_state[5]
* if len(__pyx_state) > 6 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[6]) # <<<<<<<<<<<<<<
*/
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_update); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 6, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_8);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_8, function);
}
}
__pyx_t_1 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_9, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_7);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_KernelReshaper__set_state(KernelReshaper __pyx_result, tuple __pyx_state):
* __pyx_result.np_all_distances = __pyx_state[0]; __pyx_result.np_recomputed_probs = __pyx_state[1]; __pyx_result.num_descriptors = __pyx_state[2]; __pyx_result.num_kernels = __pyx_state[3]; __pyx_result.num_obs = __pyx_state[4]; __pyx_result.num_samples = __pyx_state[5]
* if len(__pyx_state) > 6 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[6])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_KernelReshaper__set_state(<KernelReshaper> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_KernelReshaper__set_state(KernelReshaper __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.np_all_distances = __pyx_state[0]; __pyx_result.np_recomputed_probs = __pyx_state[1]; __pyx_result.num_descriptors = __pyx_state[2]; __pyx_result.num_kernels = __pyx_state[3]; __pyx_result.num_obs = __pyx_state[4]; __pyx_result.num_samples = __pyx_state[5]
* if len(__pyx_state) > 6 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("kernel_prob_reshaping.__pyx_unpickle_KernelReshaper__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":734
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":735
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 735, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":734
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":737
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":738
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 738, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":737
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":740
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":741
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 741, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":740
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":743
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":744
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 744, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":743
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":746
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":747
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 747, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":746
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":749
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("PyDataType_SHAPE", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":750
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
__pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0);
if (__pyx_t_1) {
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":751
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape # <<<<<<<<<<<<<<
* else:
* return ()
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape));
__pyx_r = ((PyObject*)__pyx_v_d->subarray->shape);
goto __pyx_L0;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":750
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":753
* return <tuple>d.subarray.shape
* else:
* return () # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_empty_tuple);
__pyx_r = __pyx_empty_tuple;
goto __pyx_L0;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":749
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":868
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":869
*
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<<
* PyArray_SetBaseObject(arr, base)
*
*/
Py_INCREF(__pyx_v_base);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":870
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
(void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base));
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":868
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":872
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_v_base;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":873
*
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr) # <<<<<<<<<<<<<<
* if base is NULL:
* return None
*/
__pyx_v_base = PyArray_BASE(__pyx_v_arr);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":874
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
__pyx_t_1 = ((__pyx_v_base == NULL) != 0);
if (__pyx_t_1) {
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":875
* base = PyArray_BASE(arr)
* if base is NULL:
* return None # <<<<<<<<<<<<<<
* return <object>base
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":874
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":876
* if base is NULL:
* return None
* return <object>base # <<<<<<<<<<<<<<
*
* # Versions of the import_* functions which are more suitable for
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_base));
__pyx_r = ((PyObject *)__pyx_v_base);
goto __pyx_L0;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":872
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":880
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* __pyx_import_array()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_array", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":881
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":882
* cdef inline int import_array() except -1:
* try:
* __pyx_import_array() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import")
*/
__pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 882, __pyx_L3_error)
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":881
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":883
* try:
* __pyx_import_array()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.multiarray failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 883, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":884
* __pyx_import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 884, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(2, 884, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":881
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":880
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* __pyx_import_array()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":886
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_umath", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":887
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":888
* cdef inline int import_umath() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 888, __pyx_L3_error)
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":887
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":889
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 889, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":890
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 890, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(2, 890, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":887
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":886
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":892
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_ufunc", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":893
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":894
* cdef inline int import_ufunc() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 894, __pyx_L3_error)
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":893
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":895
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 895, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":896
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef extern from *:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 896, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(2, 896, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":893
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":892
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error)
} else {
/* "View.MemoryView":123
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":129
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 129, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":130
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 133, __pyx_L1_error)
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 136, __pyx_L1_error)
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":139
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":140
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error)
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":141
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(1, 141, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_7;
/* "View.MemoryView":144
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":145
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 148, __pyx_L1_error)
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_8 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_9;
__pyx_v_idx = __pyx_t_8;
__pyx_t_8 = (__pyx_t_8 + 1);
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":153
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
__pyx_t_5 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 153, __pyx_L1_error)
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":154
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":158
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":159
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":161
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":162
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":164
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 164, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":166
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":169
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 176, __pyx_L1_error)
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":179
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":180
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 180, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 180, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
__pyx_t_9 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":181
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":182
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":186
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":188
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 192, __pyx_L1_error)
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":193
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":194
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":195
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":196
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":197
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":198
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":199
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":200
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":203
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":205
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":207
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":216
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":218
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":219
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":223
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":227
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":228
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":231
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":234
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":237
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":240
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":249
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":252
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error)
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":253
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":255
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":282
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":284
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":13
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":300
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":304
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":307
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":309
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":346
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":347
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":349
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error)
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":351
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":352
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":356
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":357
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":359
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":361
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error)
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":364
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":366
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":368
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":370
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":374
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
goto __pyx_L3;
}
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
__pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":377
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
* (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<<
* Py_DECREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
/* "View.MemoryView":378
*
* (<__pyx_buffer *> &self.view).obj = NULL
* Py_DECREF(Py_None) # <<<<<<<<<<<<<<
*
* cdef int i
*/
Py_DECREF(Py_None);
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
}
__pyx_L3:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":385
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":388
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":387
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":389
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":391
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":395
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 397, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":398
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":400
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":405
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":407
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 407, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":411
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":413
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":414
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 418, __pyx_L1_error)
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":420
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 420, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":423
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":425
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":427
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":429
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":435
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":436
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":437
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":439
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
__Pyx_memviewslice *__pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error)
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":446
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error)
__pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error)
/* "View.MemoryView":447
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
char const *__pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":451
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":456
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error)
__pyx_v_dst_slice = __pyx_t_1;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":459
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_2 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":461
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error)
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":462
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":464
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":466
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_2 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_2) {
/* "View.MemoryView":468
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":470
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L8:;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":475
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":476
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":479
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__Pyx_XGOTREF(__pyx_t_12);
__pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":482
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":483
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":488
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":491
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":493
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":498
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":499
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":494
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 495, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":504
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":510
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":512
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 514, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 520, __pyx_L1_error)
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":523
*
* if flags & PyBUF_ND:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":525
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":528
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":530
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":533
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":535
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":538
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":540
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":542
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":543
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":544
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":545
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":546
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":547
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":555
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error)
/* "View.MemoryView":556
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":560
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":564
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 570, __pyx_L1_error)
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":572
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__14, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":579
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":583
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":587
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":591
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":596
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":598
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":599
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":601
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":603
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":607
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":609
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":613
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":616
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":622
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":623
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":628
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":629
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":633
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":635
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":636
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":641
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":645
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":647
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":648
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":653
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":658
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":659
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":660
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":664
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":672
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":674
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":676
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":677
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":678
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 679, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__17);
__Pyx_GIVEREF(__pyx_slice__17);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__17);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":683
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":685
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__17); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":686
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":689
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(1, 689, __pyx_L1_error)
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":691
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":692
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":694
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":696
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__17);
__Pyx_GIVEREF(__pyx_slice__17);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__17);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":698
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":701
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 703, __pyx_L1_error)
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":711
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":718
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":722
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 722, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":725
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":726
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":728
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":729
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":735
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":736
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":741
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":742
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 746, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":751
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error)
/* "View.MemoryView":748
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error)
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":755
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":756
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":757
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":758
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":760
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":761
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":762
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":764
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":765
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":766
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":768
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error)
/* "View.MemoryView":774
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":778
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) }
/* "View.MemoryView":779
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) }
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":783
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":830
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":832
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error)
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":835
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":838
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error)
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":845
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":848
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":850
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":855
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":859
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":863
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":866
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":868
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":871
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":875
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":878
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":881
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":884
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":885
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":886
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":890
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":892
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":897
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":899
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":900
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":902
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":904
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":912
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":913
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":917
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 917, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 917, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":918
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":920
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":921
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":923
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":926
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":928
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 928, __pyx_L1_error)
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":931
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 931, __pyx_L1_error)
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":933
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":935
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":937
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":944
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":946
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":947
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":951
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":952
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":953
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":954
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":957
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error)
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":959
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":977
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":981
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":983
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":987
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error)
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":989
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":993
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1008
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1013
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1015
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1016
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1018
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1019
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1021
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1022
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1023
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1024
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1025
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1028
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1030
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1032
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1033
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1036
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1037
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1039
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1040
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1042
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1043
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1044
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1046
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1047
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1049
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1056
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1057
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1059
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1060
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1067
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1068
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1069
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1071
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1072
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1074
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1075
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1076
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1077
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1083
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1084
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1095
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1096
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1098
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1099
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1101
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1103
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1111
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1113
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1121
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1122
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1124
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1126
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1127
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1129
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1131
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1132
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1135
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1137
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1147
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1148
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1149
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1150
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1154
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1155
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1157
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1158
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1159
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1160
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1162
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1163
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1167
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1168
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1173
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
/* "View.MemoryView":1179
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for shape in src.shape[:ndim]:
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1181
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*
* for shape in src.shape[:ndim]: # <<<<<<<<<<<<<<
* size *= shape
*
*/
__pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim);
for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_shape = (__pyx_t_2[0]);
/* "View.MemoryView":1182
*
* for shape in src.shape[:ndim]:
* size *= shape # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * __pyx_v_shape);
}
/* "View.MemoryView":1184
* size *= shape
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1197
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1198
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1199
* for idx in range(ndim):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1201
* stride *= shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1202
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1203
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1205
* stride *= shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1219
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1220
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1222
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1224
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error)
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1227
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1228
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1229
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1230
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1231
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1233
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1237
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1239
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1242
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1244
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1246
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1254
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1253
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 1253, __pyx_L1_error)
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1258
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1258, __pyx_L1_error)
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1263
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1263, __pyx_L1_error)
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1265
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1265, __pyx_L1_error)
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1276
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1277
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1279
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1280
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1281
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1285
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1287
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1289
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1291
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1294
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1295
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1297
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error)
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1305
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1307
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1308
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1314
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1316
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1320
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1321
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1322
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1323
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1324
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1329
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error)
/* "View.MemoryView":1330
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error)
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1332
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1333
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1334
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1336
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1337
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1344
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1346
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1347
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1348
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1349
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1351
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1352
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1353
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1354
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1367
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1374
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1381
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1384
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1386
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1388
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1389
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1391
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1400
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1401
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1403
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1411
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1412
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1415
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1416
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1417
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1419
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1420
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1422
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":14
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_21kernel_prob_reshaping_KernelReshaper __pyx_vtable_21kernel_prob_reshaping_KernelReshaper;
static PyObject *__pyx_tp_new_21kernel_prob_reshaping_KernelReshaper(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *)o);
p->__pyx_vtab = __pyx_vtabptr_21kernel_prob_reshaping_KernelReshaper;
p->np_recomputed_probs = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->np_all_distances = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_21kernel_prob_reshaping_KernelReshaper(PyObject *o) {
struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *p = (struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->np_recomputed_probs);
Py_CLEAR(p->np_all_distances);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_21kernel_prob_reshaping_KernelReshaper(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *p = (struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *)o;
if (p->np_recomputed_probs) {
e = (*v)(((PyObject *)p->np_recomputed_probs), a); if (e) return e;
}
if (p->np_all_distances) {
e = (*v)(((PyObject *)p->np_all_distances), a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_21kernel_prob_reshaping_KernelReshaper(PyObject *o) {
PyObject* tmp;
struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *p = (struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *)o;
tmp = ((PyObject*)p->np_recomputed_probs);
p->np_recomputed_probs = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->np_all_distances);
p->np_all_distances = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_21kernel_prob_reshaping_KernelReshaper[] = {
{"reshape_probs", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_3reshape_probs, METH_VARARGS|METH_KEYWORDS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_5__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_7__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type_21kernel_prob_reshaping_KernelReshaper = {
PyVarObject_HEAD_INIT(0, 0)
"kernel_prob_reshaping.KernelReshaper", /*tp_name*/
sizeof(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_21kernel_prob_reshaping_KernelReshaper, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_21kernel_prob_reshaping_KernelReshaper, /*tp_traverse*/
__pyx_tp_clear_21kernel_prob_reshaping_KernelReshaper, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_21kernel_prob_reshaping_KernelReshaper, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_pw_21kernel_prob_reshaping_14KernelReshaper_1__init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_21kernel_prob_reshaping_KernelReshaper, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_array___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"kernel_prob_reshaping.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"kernel_prob_reshaping.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryview___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"kernel_prob_reshaping.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryviewslice___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"kernel_prob_reshaping._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_kernel_prob_reshaping(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_kernel_prob_reshaping},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"kernel_prob_reshaping",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0x9c, __pyx_k_Incompatible_checksums_s_vs_0x9c, sizeof(__pyx_k_Incompatible_checksums_s_vs_0x9c), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_KernelReshaper, __pyx_k_KernelReshaper, sizeof(__pyx_k_KernelReshaper), 0, 0, 1, 1},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_cat_probs, __pyx_k_cat_probs, sizeof(__pyx_k_cat_probs), 0, 0, 1, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_descriptors, __pyx_k_descriptors, sizeof(__pyx_k_descriptors), 0, 0, 1, 1},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_kernel_prob_reshaping, __pyx_k_kernel_prob_reshaping, sizeof(__pyx_k_kernel_prob_reshaping), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0},
{&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_KernelReshaper, __pyx_k_pyx_unpickle_KernelReshaper, sizeof(__pyx_k_pyx_unpickle_KernelReshaper), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_reshape_probs, __pyx_k_reshape_probs, sizeof(__pyx_k_reshape_probs), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 39, __pyx_L1_error)
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(2, 884, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":884
* __pyx_import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple_)) __PYX_ERR(2, 884, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "../../../venv/lib/python3.8/site-packages/numpy/__init__.pxd":890
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(2, 890, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 495, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__14 = PyTuple_New(1); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__14, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__17 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__17)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__17);
__Pyx_GIVEREF(__pyx_slice__17);
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "(tree fragment)":1
* def __pyx_unpickle_KernelReshaper(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__21 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
__pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_KernelReshaper, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(1, 1, __pyx_L1_error)
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__26);
__Pyx_GIVEREF(__pyx_tuple__26);
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__27);
__Pyx_GIVEREF(__pyx_tuple__27);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__28 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__28);
__Pyx_GIVEREF(__pyx_tuple__28);
__pyx_codeobj__29 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__28, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__29)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_163952500 = PyInt_FromLong(163952500L); if (unlikely(!__pyx_int_163952500)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_21kernel_prob_reshaping_KernelReshaper = &__pyx_vtable_21kernel_prob_reshaping_KernelReshaper;
__pyx_vtable_21kernel_prob_reshaping_KernelReshaper._reshape_probs = (__Pyx_memviewslice (*)(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *, __Pyx_memviewslice, __Pyx_memviewslice))__pyx_f_21kernel_prob_reshaping_14KernelReshaper__reshape_probs;
__pyx_vtable_21kernel_prob_reshaping_KernelReshaper.reshape_probs = (PyObject *(*)(struct __pyx_obj_21kernel_prob_reshaping_KernelReshaper *, PyArrayObject *, PyArrayObject *, int __pyx_skip_dispatch))__pyx_f_21kernel_prob_reshaping_14KernelReshaper_reshape_probs;
if (PyType_Ready(&__pyx_type_21kernel_prob_reshaping_KernelReshaper) < 0) __PYX_ERR(0, 14, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type_21kernel_prob_reshaping_KernelReshaper.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_21kernel_prob_reshaping_KernelReshaper.tp_dictoffset && __pyx_type_21kernel_prob_reshaping_KernelReshaper.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type_21kernel_prob_reshaping_KernelReshaper.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type_21kernel_prob_reshaping_KernelReshaper.tp_dict, __pyx_vtabptr_21kernel_prob_reshaping_KernelReshaper) < 0) __PYX_ERR(0, 14, __pyx_L1_error)
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_KernelReshaper, (PyObject *)&__pyx_type_21kernel_prob_reshaping_KernelReshaper) < 0) __PYX_ERR(0, 14, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type_21kernel_prob_reshaping_KernelReshaper) < 0) __PYX_ERR(0, 14, __pyx_L1_error)
__pyx_ptype_21kernel_prob_reshaping_KernelReshaper = &__pyx_type_21kernel_prob_reshaping_KernelReshaper;
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_array.tp_print = 0;
#endif
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_MemviewEnum.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryview.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryviewslice.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
__Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(2, 199, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(2, 222, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(2, 226, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(2, 238, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(2, 764, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initkernel_prob_reshaping(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initkernel_prob_reshaping(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_kernel_prob_reshaping(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_kernel_prob_reshaping(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_kernel_prob_reshaping(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
static PyThread_type_lock __pyx_t_2[8];
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'kernel_prob_reshaping' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_kernel_prob_reshaping(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("kernel_prob_reshaping", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_kernel_prob_reshaping) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "kernel_prob_reshaping")) {
if (unlikely(PyDict_SetItemString(modules, "kernel_prob_reshaping", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "kernel_prob_reshaping.pyx":7
* from cython.parallel import prange
*
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as np
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 7, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":1
* def __pyx_unpickle_KernelReshaper(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_21kernel_prob_reshaping_1__pyx_unpickle_KernelReshaper, NULL, __pyx_n_s_kernel_prob_reshaping); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_KernelReshaper, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "kernel_prob_reshaping.pyx":1
* #!/usr/bin/env python # <<<<<<<<<<<<<<
*
* import cython
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":209
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":316
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":317
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_2[0] = PyThread_allocate_lock();
__pyx_t_2[1] = PyThread_allocate_lock();
__pyx_t_2[2] = PyThread_allocate_lock();
__pyx_t_2[3] = PyThread_allocate_lock();
__pyx_t_2[4] = PyThread_allocate_lock();
__pyx_t_2[5] = PyThread_allocate_lock();
__pyx_t_2[6] = PyThread_allocate_lock();
__pyx_t_2[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":549
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":995
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init kernel_prob_reshaping", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init kernel_prob_reshaping");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* KeywordStringCheck */
static int __Pyx_CheckKeywordStrings(
PyObject *kwdict,
const char* function_name,
int kw_allowed)
{
PyObject* key = 0;
Py_ssize_t pos = 0;
#if CYTHON_COMPILING_IN_PYPY
if (!kw_allowed && PyDict_Next(kwdict, &pos, &key, 0))
goto invalid_keyword;
return 1;
#else
while (PyDict_Next(kwdict, &pos, &key, 0)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyString_Check(key)))
#endif
if (unlikely(!PyUnicode_Check(key)))
goto invalid_keyword_type;
}
if ((!kw_allowed) && unlikely(key))
goto invalid_keyword;
return 1;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
return 0;
#endif
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
return 0;
}
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (unlikely(memviewslice->memview || memviewslice->data)) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None))
return;
if (unlikely(__pyx_get_slice_count(memview) < 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (unlikely(first_time)) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None)) {
memslice->memview = NULL;
return;
}
if (unlikely(__pyx_get_slice_count(memview) <= 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (unlikely(last_time)) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (__Pyx_PyFastCFunction_Check(func)) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* None */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (unlikely(stop <= start))
return __Pyx_NewRef(__pyx_empty_unicode);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* None */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* PyObjectGetAttrStrNoError */
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
size_t size, enum __Pyx_ImportType_CheckSize check_size)
{
PyObject *result = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
result = PyObject_GetAttrString(module, class_name);
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if ((size_t)basicsize < size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(result);
return NULL;
}
#endif
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* MemviewDtypeToObject */
static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) {
return (PyObject *) PyFloat_FromDouble(*(double *) itemp);
}
static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) {
double value = __pyx_PyFloat_AsDouble(obj);
if ((value == (double)-1) && PyErr_Occurred())
return 0;
*(double *) itemp = value;
return 1;
}
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabsf(b.real) >= fabsf(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
float r = b.imag / b.real;
float s = (float)(1.0) / (b.real + b.imag * r);
return __pyx_t_float_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
float r = b.real / b.imag;
float s = (float)(1.0) / (b.imag + b.real * r);
return __pyx_t_float_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
float denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_float_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_float(a, a);
case 3:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, a);
case 4:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = powf(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2f(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_float(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs(b.real) >= fabs(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
double r = b.imag / b.real;
double s = (double)(1.0) / (b.real + b.imag * r);
return __pyx_t_double_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
double r = b.real / b.imag;
double s = (double)(1.0) / (b.imag + b.real * r);
return __pyx_t_double_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
double denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_double_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_double(a, a);
case 3:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, a);
case 4:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_double(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (unlikely(from_mvs->suboffsets[i] >= 0)) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const char neg_one = (char) -1, const_zero = (char) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case '?': return "'bool'";
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number, ndim;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ndim = ctx->head->field->type->ndim;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) &&
(ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (unlikely(buf->strides[dim] != sizeof(void *))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (unlikely(buf->strides[dim] != buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (unlikely(stride < buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (unlikely(buf->suboffsets)) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (unlikely(buf->ndim != ndim)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
}
if (unlikely((unsigned) buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->len > 0) {
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
goto fail;
if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
goto fail;
}
if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
goto fail;
}
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS_RO | writable_flag, 3,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS_RO | writable_flag, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
_Atomic-2.c | /* PR c/65467 */
/* { dg-do compile } */
/* { dg-additional-options "-std=c11" } */
void
f1 (void)
{
_Atomic int i;
#pragma omp for /* { dg-error "'_Atomic' iteration variable 'i'" } */
for (i = 0; i < 64; i++)
;
#pragma omp parallel for /* { dg-error "'_Atomic' iteration variable 'i'" } */
for (i = 0; i < 64; i++)
;
#pragma omp simd /* { dg-error "'_Atomic' iteration variable 'i'" } */
for (i = 0; i < 64; i++)
;
#pragma omp parallel for simd /* { dg-error "'_Atomic' iteration variable 'i'" } */
for (i = 0; i < 64; i++)
;
#pragma omp for simd /* { dg-error "'_Atomic' iteration variable 'i'" } */
for (i = 0; i < 64; i++)
;
#pragma omp for /* { dg-error "'_Atomic' iteration variable 'j'" } */
for (_Atomic int j = 0; j < 64; j++)
;
#pragma omp parallel for /* { dg-error "'_Atomic' iteration variable 'j'" } */
for (_Atomic int j = 0; j < 64; j++)
;
#pragma omp simd /* { dg-error "'_Atomic' iteration variable 'j'" } */
for (_Atomic int j = 0; j < 64; j++)
;
#pragma omp parallel for simd /* { dg-error "'_Atomic' iteration variable 'j'" } */
for (_Atomic int j = 0; j < 64; j++)
;
#pragma omp for simd /* { dg-error "'_Atomic' iteration variable 'j'" } */
for (_Atomic int j = 0; j < 64; j++)
;
}
void
f2 (void)
{
_Atomic int i;
#pragma omp distribute /* { dg-error "'_Atomic' iteration variable 'i'" } */
for (i = 0; i < 64; i++)
;
#pragma omp distribute parallel for /* { dg-error "'_Atomic' iteration variable 'i'" } */
for (i = 0; i < 64; i++)
;
#pragma omp distribute parallel for simd /* { dg-error "'_Atomic' iteration variable 'i'" } */
for (i = 0; i < 64; i++)
;
#pragma omp distribute /* { dg-error "'_Atomic' iteration variable 'j'" } */
for (_Atomic int j = 0; j < 64; j++)
;
#pragma omp distribute parallel for /* { dg-error "'_Atomic' iteration variable 'j'" } */
for (_Atomic int j = 0; j < 64; j++)
;
#pragma omp distribute parallel for simd /* { dg-error "'_Atomic' iteration variable 'j'" } */
for (_Atomic int j = 0; j < 64; j++)
;
}
void
f3 (void)
{
int i;
_Atomic int j = 0;
#pragma omp simd linear(j:2) /* { dg-error "'_Atomic' 'j' in 'linear' clause" } */
for (i = 0; i < 64; i++)
j += 2;
#pragma omp parallel for linear(j:1) /* { dg-error "'_Atomic' 'j' in 'linear' clause" } */
for (i = 0; i < 64; i++)
j++;
}
|
omp-taskloop-nogroup-single.c | #include <omp.h>
#include <unistd.h>
#include <stdio.h>
#define THREADS 6
#define LEN 4
int main(void)
{
int j=0;
#pragma omp parallel num_threads(THREADS)
{
#pragma omp single
{
#pragma omp taskloop nogroup
for (j=0; j<LEN; j++)
{
usleep(30);
}
#pragma omp taskwait
}
}
return 0;
}
|
rose_cancellation_point.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "libxomp.h"
struct OUT__1__10852___data
{
void *iend_p;
void *ist_p;
}
;
static void OUT__1__10852__(void *__out_argv);
void foo(int iend,int ist)
{
int i;
struct OUT__1__10852___data __out_argv1__10852__;
__out_argv1__10852__ . ist_p = ((void *)(&ist));
__out_argv1__10852__ . iend_p = ((void *)(&iend));
XOMP_parallel_start(OUT__1__10852__,&__out_argv1__10852__,1,0,"/home/awang15/Projects/rexdev/rex_src/tests/nonsmoke/functional/CompileTests/OpenMP_tests/cancellation_point.c",8);
XOMP_parallel_end("/home/awang15/Projects/rexdev/rex_src/tests/nonsmoke/functional/CompileTests/OpenMP_tests/cancellation_point.c",18);
}
static void OUT__1__10852__(void *__out_argv)
{
int *iend = (int *)(((struct OUT__1__10852___data *)__out_argv) -> iend_p);
int *ist = (int *)(((struct OUT__1__10852___data *)__out_argv) -> ist_p);
if (XOMP_single()) {
printf("Using %d threads.\n",(omp_get_num_threads()));
}
XOMP_barrier();
{
int _p_i;
long p_index_;
long p_lower_;
long p_upper_;
XOMP_loop_default( *iend, *ist,-1,&p_lower_,&p_upper_);
for (p_index_ = p_lower_; p_index_ >= p_upper_; p_index_ += -1) {
printf("Iteration %d is carried out by thread %d\n",p_index_,(omp_get_thread_num()));
}
}
#pragma omp cancellation point parallel
}
|
filter.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <stdint.h>
#include <string.h>
#ifndef IMG_SIZE
#define IMG_SIZE 1024*32-2
#endif
#ifndef NUM_THREADS
#define NUM_THREADS 8
#endif
typedef struct {
unsigned int width;
unsigned int height;
unsigned int internalWidth;
unsigned int internalHeight;
uint8_t* img;
} image_t;
void imgDestroy(image_t* im) {
free(im->img);
free(im);
}
void printMatrix_d(double* m, int width, int height) {
int row, col;
for(row=0; row < height; row++) {
for(col=0; col < width; col++) {
printf("%.1f\t", m[(row*height)+col]);
}
printf("\n");
}
return;
}
void printMatrix(FILE* fp, image_t* m) {
int row, col;
for(row=0; row < m->internalHeight; row++) {
for(col=0; col < m->internalWidth; col++) {
fprintf(fp, "%u\t", m->img[(row*(m->internalHeight))+col]);
}
fprintf(fp, "\n");
}
return;
}
image_t* alloc_img(int width, int height) {
image_t* im;
im = malloc(sizeof(image_t));
im->width = width;
im->height = height;
// Extra rows and columns to make convolution a little easier.
// IMPORTANT: since I'm adding only 2 pixels to each dimension,
// this will only work for 3x3 kernels!!!
im->internalWidth = width+2;
im->internalHeight = height+2;
int size = (im->internalWidth) * (im->internalHeight) * sizeof(uint8_t);
im->img = calloc(1, size);
if (im->img == NULL) {
fprintf(stderr, "Could not allocate image (size %ux%u)\n", width, height);
exit(EXIT_FAILURE);
}
return im;
}
image_t* filter(image_t* im, double* K, int Ks, int divisor) {
image_t* oi;
oi = alloc_img(im->width, im->height);
#pragma omp parallel num_threads(NUM_THREADS)
{
float acc;
unsigned int irow, icol;
int krow, kcol;
int id = omp_get_thread_num();
int nthreads = omp_get_num_threads();
for(irow=(id+1); irow <= im->width; irow = irow + nthreads) {
for(icol=1; icol <= im->height; icol++) {
acc = 0;
for(krow=-Ks; krow <= Ks; ++krow) {
for(kcol=-Ks; kcol <= Ks; ++kcol) {
acc += im->img[((irow+krow)*(im->internalHeight))+(icol+kcol)] * K[((Ks+krow)*(2*Ks+1)) + (Ks+kcol)] / divisor;
}
}
// clamp
oi->img[(irow*(oi->internalHeight))+icol] = acc > 255.0 ? 255 : (unsigned int)acc;
oi->img[(irow*(oi->internalHeight))+icol] = acc < 0.0 ? 0 : (unsigned int)acc;
}
}
}
return oi;
}
// sample kernels
int identityKernelDivisor = 1;
double identityKernel[3*3] = {0, 0, 0,
0, 1, 0,
0, 0, 0};
int sharpenDivisor = 1;
double sharpenKernel[3*3] = { 0, -1, 0,
-1, 5, -1,
0, -1, 0};
int edgeDetectionKernelDivisor = 1;
double edgeDetectionKernel[3*3] = {-1, -1, -1,
-1, 8, -1,
-1, -1, -1};
image_t* genRandomImage(unsigned int size) {
image_t* im = alloc_img(size, size);
for(int row=1; row <= im->height; ++row) {
for(int col=1; col <= im->height; ++col) {
im->img[(row*(im->internalHeight))+col] = rand() % 255;
}
}
return im;
}
int main(int argc, char** argv) {
srand(24);
printf("Generating image...\n");
image_t* original = genRandomImage(IMG_SIZE);
printf("Running convolution...\n");
printf("Kernel:\n");
printMatrix_d(sharpenKernel, 3, 3);
double start, end;
start = omp_get_wtime();
image_t* filtered = filter(original, sharpenKernel, 1, sharpenDivisor);
end = omp_get_wtime();
printf("%f\n", end-start);
FILE* ofp = fopen("original.dat", "w");
printMatrix(ofp, original);
fclose(ofp);
FILE* ffp = fopen("filtered.dat", "w");
printMatrix(ffp, filtered);
fclose(ffp);
imgDestroy(original);
imgDestroy(filtered);
return 0;
}
|
cpu_bound.c | /*
* Copyright (c) 2009, 2010, 2011, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
*/
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <omp.h>
#include <assert.h>
#define PERIOD 2500000000UL
#define ITERATIONS 10
#define STACK_SIZE (64 * 1024)
struct workcnt {
uint64_t cnt;
} __attribute__ ((aligned (64)));
static inline uint64_t rdtsc(void)
{
uint64_t eax, edx;
__asm volatile ("rdtsc" : "=a" (eax), "=d" (edx));
return (edx << 32) | eax;
}
int main(int argc, char *argv[])
{
static struct workcnt workcnt[32];
int nthreads;
if(argc == 2) {
nthreads = atoi(argv[1]);
bomp_bomp_init(nthreads);
omp_set_num_threads(nthreads);
} else {
assert(!"Specify number of threads");
}
uint64_t glast = rdtsc();
for(;;) {
// Do some work
#pragma omp parallel
{
uint64_t last = glast;
for(;;) {
workcnt[omp_get_thread_num()].cnt++;
if(rdtsc() >= last + PERIOD) {
break;
}
}
}
printf("%s: %lu: threads %d (%s), progress ", argv[0], rdtsc(), omp_get_num_threads(), omp_get_dynamic() ? "dynamic" : "static");
for(int n = 0; n < 32; n++) {
printf("%lu ", workcnt[n].cnt);
}
printf("\n");
fflush(stdout);
glast += PERIOD;
}
}
|
imd_colrad_nolb.c | #include "imd.h"
#include <sys/time.h>
#include <gsl/gsl_integration.h>
#include <gsl/gsl_errno.h>
// ***************************************************
// * TODO:
// * mpi2 und mpi3 anpassen weil energies delE und kbTe in eV
// * SPEEDUP pragma omp auch in ydot
// * floats wo möglich
// ****************************************************
#define USEFLOAT // hauptsächlich in der funktion genexptint. Profiling zeigte, dass
// hier die meiste zeit verbraucht wird -> float verdoppelt performance
//#define OMP
#define LAPACK
//#define MULTIPHOTON
// #define SPONT //<-- spontante emission, Kaum effekt
//#define STARK //<-- reabsorption via stark effect
#define DOIPD //
#ifdef OMP
#include <omp.h>
#endif
#define TIMING //time-elapsed to stdout
#define MAXLEVEL 4 // bis zu welchem Ionisationsgrad?
// #ifdef USEFLOAT //Dieser Versuch ist fehlgeschlagen...
// #define EXPR(x) expf(x) //floats an wichtigen stellen erschweren
// #else //konvergenz für solver
// #define EXPR(x) exp(x)
// #endif
#define EXPR(x) exp(x) //<--NIEMALS expf draus machen!
#ifdef USEFLOAT
float fak(float t, float x, float j,float s); //aux. function for genexpint
float genexpint(float x,float ss,float j);
#else
double fak(double t, double x, double j,double s); //aux. function for genexpint
double genexpint(double x,double ss,double j);
#endif
double ExpInt(double x); //gsl hat keine float-variante für expint
const int MAX_LINE_LENGTH=3000; //wird von colrad_read benuthzt
//Hängt davon ab wieviele states betrachtet werden
//sollte eigentlich besser dynamisch berechnet werden
// *********************************************************
// PHYSICAL CONSTANTS
// *********************************************************
// const double eV2J=1.6021766E-19;
const double eV2H=0.03674932; //eV to Hartree
const double colrad_reltol=1e-6;
const double colrad_abstol=10.0;
// const double J2eV=6.2415091E18;
const double planck=6.62607004E-34; // J/s
const double bohr_radius=0.52917721067E-10; // m
const double bohr_radius_sq=2.800285202924816e-21;
const double hbar_cub=1.172812163789953e-102; //hbar^3
const double double_emass_pow_3_2 = 2.459112949719466e-45; // (2*emass)^3/2
const int MAXLINE = 255;
const double pi=3.141592653589793;
const double pi_sq=9.869604401089358;
const double E_ion_H=13.6; // eV
const double E_ion_H_J=2.178960176000000e-18; // J
const double E_ion_H_sq_J=4.747867448593952e-36;
const double colrad_tequi=1e-6;//TEST// 1e-12; //bei initial equi ohne Temperatur-variation erst einmal
//die Saha-besetzungsdichten equilibrieren
//const double LIGHTSPEED=2.997925458e8; // m/s
double HBAR;
double LASERFREQ;
int num_threads;
//const double EMASS=9.10938356e-31; // kg
//const double ECONST=8.854187817e-12; // As/Vm
//const double BOLTZMAN=1.38064852e-23; // J/K
//const double ECHARGE=1.60217662e-19; // C
//const double AMU=1.66053904020e-27; // atomic mass unit
// ******************************************************************************
// * CROSS SECTION INTEGRATION STUFF
// ******************************************************************************
gsl_integration_workspace * winteg_inner=NULL;
gsl_integration_workspace * winteg_outer=NULL;
gsl_integration_workspace * winteg_fermi=NULL;
gsl_integration_workspace * winteg_exc=NULL; //excitation
struct my_f_params { double ne; double T;double mu; double E;double DeltaE; int allowed;};
struct my_f_params fparams_inner; //For inner integrand
struct my_f_params fparams_outer; //outer integrand
struct my_f_params fparams_fermi;
struct my_f_params fparams_exc;
double inner_integrand_ionization(double x, void *p); // integrate along E'
double outer_integrand_ionization(double x,void *p); // integrate along E
double double_integral_ionization(double ne,double T, double mu, double DeltaE); //evaluates double integral
double inner_integrand_recombination(double x, void *p);
double outer_integrand_recombination(double x,void *p);
double double_integral_recombination(double ne,double T, double mu, double DeltaE);
double integrand_excitation(double x,void *p);
double eval_excitation_integral(double ne,double T,double mu, double DeltaE, int allowed);
double eval_dexcitation_integral(double ne,double T,double mu, double DeltaE, int allowed);
double integrand_deexcitation(double x,void *p);
double fermi_integrand(double x, void *p);
double eval_fermi_integrand(double ne,double T, double mu);
const double integ_reltol = 1e-5;
const double integ_abstol = 1e-20;
const int integ_meshdim =2500;
#define muINF (20*mu)
#define MINRATE 1e16 //wenn geschätzte max. ioniz.rate kleiner als das --> integrale garnicht erst berechnen
#define MINCONC 1e-60 //im Saha-init sollen zu kleine konzentrationen ignoriert werden
double k_EE_MAX, k_EI_MAX, k_EE_REV_MAX, k_EI_REV_MAX; //DEBUG PURPOSE
// *********************************************************
// CVODE-STRUCT FOR SOLVER PARAMS
// *********************************************************
typedef struct {
realtype It; //Intesity
realtype IPD0,IPD1,IPD2,IPD3,IPD4;
double EF;
realtype P_EE,P_EI,P_MPI2,P_MPI3,P_RR;
double P_TOTAL; //komplette colrad-Leistungsdichte, für eng-filge
double dens; //weil cv F(dens,temp)
double ni; //für Z=ne/ni
bool initial_equi;
double Tinit; //initial Temp. during equi must not change!
} *colrad_UserData;
colrad_UserData cdata;
// *********************************************************
// MAIN
// *********************************************************
void do_colrad(double dt)
{
k_EE_MAX= k_EI_MAX= k_EE_REV_MAX= k_EI_REV_MAX=0.0;
int flag;
double t;
double tout=dt;
int i,j,k;
N_Vector y;
double Te0,Ti0,rho0,ni0,ne0;
colrad_ptotal=0.0;
if(myid==0 && cdata->initial_equi)
printf("COLRAD performs pre-equilibration of Saha-distribution\nfor t=%.4e s...This may take some time.\n",colrad_tequi);
#ifdef TIMING
//if(myid==0)
// {
struct timeval start, end;
gettimeofday(&start, NULL);
// }
#endif
for(i=1;i<local_fd_dim.x-1;i++)
{
for(j=1;j<local_fd_dim.y-1;j++)
{
for(k=1;k<local_fd_dim.z-1;k++)
{
if(l1[i][j][k].natoms < fd_min_atoms) continue;
y=l1[i][j][k].y;
Te0=l1[i][j][k].temp*11604.5;
Ti0=l1[i][j][k].md_temp*11604.5;
rho0=l1[i][j][k].dens;
ni0=rho0/AMU/26.9185; //1e28; //1e26/m^3 entspricht etwa 1e-4/Angtrom^3
if(cdata->initial_equi==true)
{
double Zmean=MeanCharge(Te0, rho0, atomic_charge, atomic_weight,i,j,k);
ne0= Zmean* l1[i][j][k].dens / (atomic_weight * AMU);
l1[i][j][k].ne=ne0; //Saha init greift darauf zu
colrad_Saha_init(i, j, k);
cdata->Tinit=Te0;
}
else //NORMAL
{
ne0=l1[i][j][k].ne;
}
Ith(y,0)=Te0;
Ith(y,1)=Ti0;
Ith(y,2)=ne0;
flag = CVodeReInit(cvode_mem, 0.0, y);
if(cdata->initial_equi==true)
{
// printf("myid:%d, RUNNNING INITIAL EQUI\n",myid);
flag = CVode(cvode_mem, colrad_tequi, y, &t, CV_NORMAL);
int i_global,j_global,k_global;
i_global = ((i - 1) + my_coord.x * (local_fd_dim.x - 2));
j_global = ((j - 1) + my_coord.y * (local_fd_dim.y - 2));
k_global = ((k-1) + my_coord.z*(local_fd_dim.z-2));
long int nje;
long int nfe;
long int nsetups;
long int nni;
long int nst;
long int ncfn;
long int netf;
CVodeGetNumJacEvals(cvode_mem, &nje);
CVodeGetNumRhsEvals(cvode_mem, &nfe);
CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
CVodeGetNumSteps(cvode_mem, &nst);
CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
CVodeGetNumErrTestFails(cvode_mem, &netf);
printf("myid:%d, COLRAD Cell %d was equilibrated, nfe:%ld, nje:%ld, nsetups:%ld, nni:%ld,nst:%ld,ncfn:%ld,netf:%ld\n",
myid,i_global, nfe,nje,nsetups,nni,nst,ncfn,netf);
} //initial equi
else //NORMAL
{
cdata->dens=l1[i][j][k].dens;
flag = CVode(cvode_mem, tout, y, &t, CV_NORMAL);
colrad_ptotal+=(fd_vol)*1e-30*cdata->P_TOTAL; // d.h. ptotal ist Gesamt-Leisung
int i_global;
i_global = ((i - 1) + my_coord.x * (local_fd_dim.x - 2));
long int nje;
long int nfe;
long int nsetups;
long int nni;
long int nst;
long int ncfn;
long int netf;
CVodeGetNumJacEvals(cvode_mem, &nje);
CVodeGetNumRhsEvals(cvode_mem, &nfe);
CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
CVodeGetNumSteps(cvode_mem, &nst);
CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
CVodeGetNumErrTestFails(cvode_mem, &netf);
printf("myid:%d, COLRAD Cell %d step done, nfe:%ld, nje:%ld, nsetups:%ld, nni:%ld,nst:%ld,ncfn:%ld,netf:%ld\n",
myid,i_global, nfe,nje,nsetups,nni,nst,ncfn,netf);
//ni0=cdata->ni;
}
//REASSIGN NEW TE AND NE
l1[i][j][k].temp=Ith(y,0)/11604.5;
l1[i][j][k].ne=Ith(y,2);
l1[i][j][k].Z=l1[i][j][k].ne/ni0;
l1[i][j][k].P_EE=cdata->P_EE;
l1[i][j][k].P_EI=cdata->P_EI;
l1[i][j][k].P_MPI2=cdata->P_MPI2;
l1[i][j][k].P_MPI3=cdata->P_MPI3;
l1[i][j][k].P_RR=cdata->P_RR;
l2[i][j][k].temp=l1[i][j][k].temp; //auch in l2 speichern! wichtig!
l2[i][j][k].ne=l1[i][j][k].ne; //sonst geht alles im diffusion-step vor
l2[i][j][k].Z=l1[i][j][k].Z; //ttm-writeout verloren!
l2[i][j][k].P_EE=l1[i][j][k].P_EE;
l2[i][j][k].P_EI=l1[i][j][k].P_EI;
l2[i][j][k].P_MPI2=l1[i][j][k].P_MPI2;
l2[i][j][k].P_MPI3=l1[i][j][k].P_MPI3;
l2[i][j][k].P_RR=l1[i][j][k].P_RR;
if(l1[i][j][k].temp <0 || isnan(l1[i][j][k].temp) !=0 )
{
char errstr[255];
sprintf(errstr,"ERROR in COLRAD: Te became Nan or <0\n");
error(errstr);
}
if(l1[i][j][k].ne <0 || isnan(l1[i][j][k].ne) !=0 )
{
char errstr[255];
sprintf(errstr,"ERROR in COLRAD: ne became Nan or <0\n");
error(errstr);
}
}
}
}
if(cdata->initial_equi==true)
{
colrad_write(0);
cdata->initial_equi=false;
if(myid==0)
printf("Initial equi done\n");
}
else if(steps % ttm_int ==0)
{
// colrad_write(steps);
}
#ifdef TIMING
MPI_Barrier(cpugrid);
// if(myid==0)
// {
gettimeofday(&end, NULL);
double delta = ((end.tv_sec - start.tv_sec) * 1000000u +
end.tv_usec - start.tv_usec) / 1.e6;
// if(myid==0)
printf("myid:%d, telaps:%f, COLRAD STEP DONE, kee:%.4e,keerev:%.4e, kei:%.4e, keirev:%.4e\n",
myid,delta, k_EE_MAX, k_EE_REV_MAX, k_EI_MAX, k_EI_REV_MAX);
// }
#endif
}
// *********************************************************
// INIT FUNC
// *********************************************************
void colrad_init(void)
{
//Init gsl-integration stuff
winteg_inner= gsl_integration_workspace_alloc (integ_meshdim); //Integration workspace allocation
winteg_outer= gsl_integration_workspace_alloc (integ_meshdim); //Integration workspace allocation
winteg_fermi= gsl_integration_workspace_alloc (integ_meshdim); //Integration workspace allocation
winteg_exc= gsl_integration_workspace_alloc (integ_meshdim);
HBAR=planck/2.0/pi;
LASERFREQ=LIGHTSPEED/lambda;
SUNMatrix A;
N_Vector abstol;
int i,j,k;
if(myid==0)
{
printf("*****************************************\n");
printf("* COLLISIONAL RADIATIVE MODEL *\n");
printf("*****************************************\n");
printf(" READING ENERGY LEVELS \n");
printf(" reltol:%.4e\n",colrad_reltol);
printf(" abstol:%.4e\n",colrad_abstol);
}
colrad_read_states();
if(myid==0)
{
printf(" Nr. of Equations:%d *\n",total_species+3);
}
MPI_Barrier(cpugrid);
#ifdef OMP
#pragma omp parallel
{
#pragma omp single
{
num_threads=omp_get_num_threads();
printf("myid:%d, omp threads:%d\n",myid,num_threads);
}
}
#endif
if(myid==0)
{
printf("*****************************************\n");
}
cdata=(colrad_UserData) malloc(sizeof *cdata);
cdata->initial_equi=true;
cdata->P_TOTAL=0.0;
//total_species=z0_len+z1_len; //wird bereits in read-states reduced
neq=total_species+3;
for(i=1;i<local_fd_dim.x-1;i++)
{
for(j=1;j<local_fd_dim.y-1;j++)
{
for(k=1;k<local_fd_dim.z-1;k++)
{
l1[i][j][k].y=N_VNew_Serial(neq);
l1[i][j][k].P_EE=0.0;
l1[i][j][k].P_EI=0.0;
l1[i][j][k].P_MPI2=0.0;
l1[i][j][k].P_MPI3=0.0;
l1[i][j][k].P_RR=0.0;
//l2... <--brauche nur 1 mal speicher alloc'en
//aber in DIFF-LOOP darauf achten, dass beim swappen
//l2.y immer auf l1.y zeigt und nicht ins Leere
}
}
}
abstol = N_VNew_Serial(neq);
N_Vector Vdummy=N_VNew_Serial(neq); //wird bei re-init ersetzt
// ************************* //
// * TOLERANCES *
// *************************
for(i=0;i<neq;i++)
{
Ith(abstol,i) = colrad_abstol;//fmax(Ith(colrad_y,i)*1e-6,10.0);
}
Ith(abstol,0)=5.0; //Temp
// ************************* //
// * SOLVER & CVODE INIT *
// *************************
cvode_mem=NULL;
cvode_mem = CVodeCreate(CV_BDF);
CVodeInit(cvode_mem, colrad_ydot, 0, Vdummy);
CVodeSetUserData(cvode_mem, cdata);
CVodeSVtolerances(cvode_mem, colrad_reltol, abstol);
SUNLinearSolver LS;
A=SUNDenseMatrix(neq, neq);
#ifdef LAPACK
LS = SUNLinSol_LapackDense(Vdummy, A);
#else
LS=SUNLinSol_Dense(Vdummy, A);
#endif
CVodeSetLinearSolver(cvode_mem, LS, A);
//////////////////////////////////////////////////////////////////////
// OPTIONAL FUNCTIONS: SIEHE S.42, TABLE 4.2
//////////////////////////////////////////////////////////////////////
//CVodeSetIterType(cvode_mem,CV_NEWTON); //ist ja eh schon gesetzt
//Keine gute idee mindt zu setzen
//CVodeSetMaxOrd(cvode_mem,12); //Bei adams default=12
//CVodeSetMaxErrTestFails(cvode_mem,7); //Default=7
//CVodeSetMaxNonlinIters(cvode_mem,3); //Default=3
//CVodeSetMaxConvFails(cvode_mem,10); //Default=10
//
//CVodeSetMaxStep(cvode_mem,1e-15);
//CVodeSetNonlinConvCoef(cvode_mem,0.01); //Default=0.1
//Max Steps muss sehr hoch sein bei großen steps,
//sonst beschwert sich newton
CVodeSetMaxNumSteps(cvode_mem,1500); //Default=500
// CVodeSetInitStep(cvode_mem,1e-17); //ACHTUNG:Wenn zu klein --> BS
//CVodeSetEpsLin(cvode_mem,0.01); //Default=0.05;
CVodeSetMaxStepsBetweenJac(cvode_mem,50); //default 50 --> guter performance boost
// CVodeSetMaxStepsBetweenJac(cvode_mem,50); //default 50 --> guter performance boost
//N_VDestroy(dummy);
/*
N_VDestroy(y);
SUNMatDestroy(A);
SUNLinSolFree(LS);
CVodeFree(&cvode_mem);
*/
//colrad_read(0);
}
void colrad_Saha_init(int i,int j,int k)
{
// int i,j,k;
// for(i=1;i<local_fd_dim.x-1;i++)
// {
// for(j=1;i<local_fd_dim.y-1;j++)
// {
// for(k=1;i<local_fd_dim.z-1;k++)
// {
double Te0,Ti0,ne0,ni0,rho0;
N_Vector y;
y=l1[i][j][k].y;
Te0=l1[i][j][k].temp*11604.5;
Ti0=l1[i][j][k].md_temp*11604.5;
rho0=l1[i][j][k].dens; ///1e10;
ni0=rho0/AMU/26.9185; //1e28; //1e26/m^3 entspricht etwa 1e-4/Angtrom^3
ne0=l1[i][j][k].ne;
Ith(y,0)=Te0;
Ith(y,1)=Ti0;
Ith(y,2)=ne0;
do_Saha(Te0,ni0,ne0,y);
// }
// }
// }
}
void colrad_read_states(void)
{
// **************************************************************************
// * READ STATES FILES
// **********************************************************************
int lcnt = 1;
int i, j;
double *buf; //buffer 1d array for communication
FILE* fin = NULL;
char line[255];
if (myid == 0)
{
fin = fopen("Al0_states.txt", "r");
if (fin == NULL)
{
char errstr[255];
sprintf(errstr, "ERROR in colrad_read_states: File %s not found\n", "Al0_states.txt");
error(errstr);
}
while (1) {
if (fgets(line, MAXLINE, fin) == NULL) break;
lcnt++;
}
alloc2darr(double, STATES_z0, lcnt, 6);
lcnt = 0;
rewind(fin);
while (1)
{
if (fgets(line, MAXLINE, fin) == NULL) break;
sscanf(line, "%lf\t%lf\t%lf\t%lf\t%lf\t%lf",
&STATES_z0[lcnt][0], &STATES_z0[lcnt][1], &STATES_z0[lcnt][2],
&STATES_z0[lcnt][3], &STATES_z0[lcnt][4], &STATES_z0[lcnt][5]);
lcnt++;
}
z0_len = lcnt;
fclose(fin);
total_species += z0_len;
}
//NOW COMMUNICATE
MPI_Bcast(&z0_len, 1, MPI_INT, 0, cpugrid);
alloc1darr(double, buf, z0_len * 6);
MPI_Barrier(cpugrid);
if (myid == 0)
{
for (i = 0; i < z0_len * 6; i += 6) //fill 1d buff-array
{
buf[i] = (double) STATES_z0[i / 6][0];
buf[i + 1] = (double) STATES_z0[i / 6][1];
buf[i + 2] = (double) STATES_z0[i / 6][2];
buf[i + 3] = (double) STATES_z0[i / 6][3];
buf[i + 4] = (double) STATES_z0[i / 6][4];
buf[i + 5] = (double) STATES_z0[i / 6][5];
//printf("E:%.4e,i/6:%d\n",STATES_z0[i/6][2], i/6);
}
}
MPI_Barrier(cpugrid);
MPI_Bcast(buf, z0_len * 6, MPI_DOUBLE, 0, cpugrid);
//NOW RECONSTRUCT on other procs
if (myid > 0)
{
alloc2darr(double, STATES_z0, z0_len, 6);
for (i = 0; i < z0_len * 6; i += 6)
{
STATES_z0[i / 6][0] = buf[i];
STATES_z0[i / 6][1] = buf[i + 1];
STATES_z0[i / 6][2] = buf[i + 2];
STATES_z0[i / 6][3] = buf[i + 3];
STATES_z0[i / 6][4] = buf[i + 4];
STATES_z0[i / 6][5] = buf[i + 5];
}
}
free(buf);
// ***********************************************
//Read Al, Z=+1
// **********************************************
if (myid == 0)
{
lcnt = 1;
fin = fopen("Al1_states.txt", "r");
if (fin == NULL)
{
char errstr[255];
sprintf(errstr, "ERROR in colrad_read_states: File %s not found\n", "Al1_states.txt");
error(errstr);
}
while (1) {
if (fgets(line, MAXLINE, fin) == NULL) break;
lcnt++;
}
alloc2darr(double, STATES_z1, lcnt, 6);
lcnt = 0;
rewind(fin);
while (1)
{
if (fgets(line, MAXLINE, fin) == NULL) break;
sscanf(line, "%lf\t%lf\t%lf\t%lf\t%lf\t%lf",
&STATES_z1[lcnt][0], &STATES_z1[lcnt][1], &STATES_z1[lcnt][2],
&STATES_z1[lcnt][3], &STATES_z1[lcnt][4], &STATES_z1[lcnt][5]);
lcnt++;
}
z1_len = lcnt;
fclose(fin);
total_species += z1_len;
}
//NOW COMMUNICATE
MPI_Bcast(&z1_len, 1, MPI_INT, 0, cpugrid);
alloc1darr(double, buf, z1_len * 6);
if (myid == 0)
{
for (i = 0; i < z1_len * 6; i += 6) //fill 1d buff-array
{
buf[i] = (double) STATES_z1[i / 6][0];
buf[i + 1] = (double) STATES_z1[i / 6][1];
buf[i + 2] = (double) STATES_z1[i / 6][2];
buf[i + 3] = (double) STATES_z1[i / 6][3];
buf[i + 4] = (double) STATES_z1[i / 6][4];
buf[i + 5] = (double) STATES_z1[i / 6][5];
}
}
MPI_Bcast(buf, z1_len * 6, MPI_DOUBLE, 0, cpugrid);
//NOW RECONSTRUCT on other procs
if (myid > 0)
{
alloc2darr(double, STATES_z1, z1_len, 6);
for (i = 0; i < z1_len * 6; i += 6)
{
STATES_z1[i / 6][0] = buf[i];
STATES_z1[i / 6][1] = buf[i + 1];
STATES_z1[i / 6][2] = buf[i + 2];
STATES_z1[i / 6][3] = buf[i + 3];
STATES_z1[i / 6][4] = buf[i + 4];
STATES_z1[i / 6][5] = buf[i + 5];
}
}
free(buf);
// ***********************************************
//Read Al, Z=+2
// **********************************************
#if MAXLEVEL > 1
if (myid == 0)
{
lcnt = 1;
fin = fopen("Al2_states.txt", "r");
if (fin == NULL)
{
char errstr[255];
sprintf(errstr, "ERROR in colrad_read_states: File %s not found\n", "Al2_states.txt");
error(errstr);
}
while (1) {
if (fgets(line, MAXLINE, fin) == NULL) break;
lcnt++;
}
alloc2darr(double, STATES_z2, lcnt, 6);
lcnt = 0;
rewind(fin);
while (1)
{
if (fgets(line, MAXLINE, fin) == NULL) break;
sscanf(line, "%lf\t%lf\t%lf\t%lf\t%lf\t%lf",
&STATES_z2[lcnt][0], &STATES_z2[lcnt][1], &STATES_z2[lcnt][2],
&STATES_z2[lcnt][3], &STATES_z2[lcnt][4], &STATES_z2[lcnt][5]);
lcnt++;
}
z2_len = lcnt;
fclose(fin);
total_species += z2_len;
}
//NOW COMMUNICATE
MPI_Bcast(&z2_len, 1, MPI_INT, 0, cpugrid);
alloc1darr(double, buf, z2_len * 6);
if (myid == 0)
{
for (i = 0; i < z2_len * 6; i += 6) //fill 1d buff-array
{
buf[i] = (double) STATES_z2[i / 6][0];
buf[i + 1] = (double) STATES_z2[i / 6][1];
buf[i + 2] = (double) STATES_z2[i / 6][2];
buf[i + 3] = (double) STATES_z2[i / 6][3];
buf[i + 4] = (double) STATES_z2[i / 6][4];
buf[i + 5] = (double) STATES_z2[i / 6][5];
}
}
MPI_Bcast(buf, z2_len * 6, MPI_DOUBLE, 0, cpugrid);
//NOW RECONSTRUCT on other procs
if (myid > 0)
{
alloc2darr(double, STATES_z2, z2_len, 6);
for (i = 0; i < z2_len * 6; i += 6)
{
STATES_z2[i / 6][0] = buf[i];
STATES_z2[i / 6][1] = buf[i + 1];
STATES_z2[i / 6][2] = buf[i + 2];
STATES_z2[i / 6][3] = buf[i + 3];
STATES_z2[i / 6][4] = buf[i + 4];
STATES_z2[i / 6][5] = buf[i + 5];
}
}
free(buf);
#endif //MAXLEVEL > 1
// ***********************************************
//Read Al, Z=+3
// **********************************************
#if MAXLEVEL > 2
if (myid == 0)
{
lcnt = 1;
fin = fopen("Al3_states.txt", "r");
if (fin == NULL)
{
char errstr[255];
sprintf(errstr, "ERROR in colrad_read_states: File %s not found\n", "Al3_states.txt");
error(errstr);
}
while (1) {
if (fgets(line, MAXLINE, fin) == NULL) break;
lcnt++;
}
alloc2darr(double, STATES_z3, lcnt, 6);
lcnt = 0;
rewind(fin);
while (1)
{
if (fgets(line, MAXLINE, fin) == NULL) break;
sscanf(line, "%lf\t%lf\t%lf\t%lf\t%lf\t%lf",
&STATES_z3[lcnt][0], &STATES_z3[lcnt][1], &STATES_z3[lcnt][2],
&STATES_z3[lcnt][3], &STATES_z3[lcnt][4], &STATES_z3[lcnt][5]);
lcnt++;
}
z3_len = lcnt;
fclose(fin);
total_species += z3_len;
}
//NOW COMMUNICATE
MPI_Bcast(&z3_len, 1, MPI_INT, 0, cpugrid);
alloc1darr(double, buf, z3_len * 6);
if (myid == 0)
{
for (i = 0; i < z3_len * 6; i += 6) //fill 1d buff-array
{
buf[i] = (double) STATES_z3[i / 6][0];
buf[i + 1] = (double) STATES_z3[i / 6][1];
buf[i + 2] = (double) STATES_z3[i / 6][2];
buf[i + 3] = (double) STATES_z3[i / 6][3];
buf[i + 4] = (double) STATES_z3[i / 6][4];
buf[i + 5] = (double) STATES_z3[i / 6][5];
}
}
MPI_Bcast(buf, z3_len * 6, MPI_DOUBLE, 0, cpugrid);
//NOW RECONSTRUCT on other procs
if (myid > 0)
{
alloc2darr(double, STATES_z3, z3_len, 6);
for (i = 0; i < z3_len * 6; i += 6)
{
STATES_z3[i / 6][0] = buf[i];
STATES_z3[i / 6][1] = buf[i + 1];
STATES_z3[i / 6][2] = buf[i + 2];
STATES_z3[i / 6][3] = buf[i + 3];
STATES_z3[i / 6][4] = buf[i + 4];
STATES_z3[i / 6][5] = buf[i + 5];
}
}
free(buf);
#endif //MAXLEVEL > 2
// ***********************************************
//Read Al, Z=+4
// **********************************************
#if MAXLEVEL > 3
if (myid == 0)
{
lcnt = 1;
fin = fopen("Al4_states.txt", "r");
if (fin == NULL)
{
char errstr[255];
sprintf(errstr, "ERROR in colrad_read_states: File %s not found\n", "Al4_states.txt");
error(errstr);
}
while (1) {
if (fgets(line, MAXLINE, fin) == NULL) break;
lcnt++;
}
alloc2darr(double, STATES_z4, lcnt, 6);
lcnt = 0;
rewind(fin);
while (1)
{
if (fgets(line, MAXLINE, fin) == NULL) break;
sscanf(line, "%lf\t%lf\t%lf\t%lf\t%lf\t%lf",
&STATES_z4[lcnt][0], &STATES_z4[lcnt][1], &STATES_z4[lcnt][2],
&STATES_z4[lcnt][3], &STATES_z4[lcnt][4], &STATES_z4[lcnt][5]);
lcnt++;
}
z4_len = lcnt;
fclose(fin);
total_species += z4_len;
}
//NOW COMMUNICATE
MPI_Bcast(&z4_len, 1, MPI_INT, 0, cpugrid);
alloc1darr(double, buf, z4_len * 6);
if (myid == 0)
{
for (i = 0; i < z4_len * 6; i += 6) //fill 1d buff-array
{
buf[i] = (double) STATES_z4[i / 6][0];
buf[i + 1] = (double) STATES_z4[i / 6][1];
buf[i + 2] = (double) STATES_z4[i / 6][2];
buf[i + 3] = (double) STATES_z4[i / 6][3];
buf[i + 4] = (double) STATES_z4[i / 6][4];
buf[i + 5] = (double) STATES_z4[i / 6][5];
}
}
MPI_Bcast(buf, z4_len * 6, MPI_DOUBLE, 0, cpugrid);
//NOW RECONSTRUCT on other procs
if (myid > 0)
{
alloc2darr(double, STATES_z4, z4_len, 6);
for (i = 0; i < z4_len * 6; i += 6)
{
STATES_z4[i / 6][0] = buf[i];
STATES_z4[i / 6][1] = buf[i + 1];
STATES_z4[i / 6][2] = buf[i + 2];
STATES_z4[i / 6][3] = buf[i + 3];
STATES_z4[i / 6][4] = buf[i + 4];
STATES_z4[i / 6][5] = buf[i + 5];
}
}
free(buf);
#endif //MAXLEVEL > 3
// **********************************
// * ALLOC ARRAYS
// **********************************
alloc2darr(double, k_EE_z0_z0, z0_len, z0_len);
alloc2darr(double, k_EE_z0_z0_b, z0_len, z0_len);
alloc2darr(double, k_EE_z1_z1, z1_len, z1_len);
alloc2darr(double, k_EE_z1_z1_b, z1_len, z1_len);
#if MAXLEVEL > 1
alloc2darr(double, k_EE_z2_z2, z2_len, z2_len);
alloc2darr(double, k_EE_z2_z2_b, z2_len, z2_len);
#endif
#if MAXLEVEL > 2
alloc2darr(double, k_EE_z3_z3, z3_len, z3_len);
alloc2darr(double, k_EE_z3_z3_b, z3_len, z3_len);
#endif
#if MAXLEVEL > 3
alloc2darr(double, k_EE_z4_z4, z4_len, z4_len);
alloc2darr(double, k_EE_z4_z4_b, z4_len, z4_len);
#endif
// **********************************************
// Now Thermal Ionization and Recomb. rate coeff. arrays
// z0->z1
alloc2darr(double, k_EI_z0_z1, z0_len, z1_len);
alloc2darr(double, k_EI_z1_z0, z0_len, z1_len);
#if MAXLEVEL > 1
//z1->z2
alloc2darr(double, k_EI_z1_z2, z1_len, z2_len);
alloc2darr(double, k_EI_z2_z1, z1_len, z2_len);
#endif
#if MAXLEVEL > 2
//z2->z3
alloc2darr(double, k_EI_z2_z3, z2_len, z3_len);
alloc2darr(double, k_EI_z3_z2, z2_len, z3_len);
#endif
#if MAXLEVEL > 3
//z3->z4
alloc2darr(double, k_EI_z3_z4, z3_len, z4_len);
alloc2darr(double, k_EI_z4_z3, z3_len, z4_len);
#endif
// k_MPI arrays
//z0->z1
alloc3darr(double, k_MPI_z0_z1, z0_len, z1_len, 2);
alloc3darr(double, k_MPI_z1_z0, z0_len, z1_len, 2);
//z1->z2
#if MAXLEVEL > 1
alloc3darr(double, k_MPI_z1_z2, z1_len, z2_len, 2);
alloc3darr(double, k_MPI_z2_z1, z1_len, z2_len, 2);
#endif
//z2->z3
#if MAXLEVEL > 2
alloc3darr(double, k_MPI_z2_z3, z2_len, z3_len, 2);
alloc3darr(double, k_MPI_z3_z2, z2_len, z3_len, 2);
#endif
//z3->z4
#if MAXLEVEL > 3
alloc3darr(double, k_MPI_z3_z4, z3_len, z4_len, 2);
alloc3darr(double, k_MPI_z4_z3, z3_len, z4_len, 2);
#endif
MPI_Bcast(&total_species, 1, MPI_INT, 0, cpugrid);
}
void do_Saha(double Te,double totalc,double ne,N_Vector y) //Bei init
{
double tmp;
double Zav=ne/totalc;
// double Ti=Te;
double Q_z0,Q_z1,Q_z2,Q_z3,Q_z4,Q_z5; //partition functions
// double IPD_SP=0.0; //Stewart and Pyatt =(pow(1.0+a/Debye,2.0/3.0)-1.0)/2.0/(Zav+1.0);
// double IPD_IS=0.0; //Ion-sphere model (high dens,low T) =3.0*1.0*ECHARGE*ECHARGE/2.0/a/BOLTZMAN/Te;
// double IPD_DH=0.0; //Debye-Hückel (high T,low dens) = 1.0*ECHARGE*ECHARGE/Debye/BOLTZMAN/Te;
double r10,r21,r32,r43,r54; //ratios of ion. conc.
double DeltaE;
double IPD=0.0;
double n0,n1,n2,n3,n4,n5;
double p; //probability
int i;
r10=r21=r32=r43=r54=0;
Zav=ne/totalc;
double EF=fermi_E(ne);
double mu=chempot(ne,Te);
//Debye=sqrt(BOLTZMAN*Te/4.0/pi/ECHARGE/ECHARGE/ne/(Zav+1));
tmp=pow(2.0*pi*EMASS*BOLTZMAN*Te,1.5)/planck/planck/planck; //ACHTUNG: Das ist wieder thermal-de brogilie Lambda
//Nutze richtiges chempot!!!!
#ifdef DOIPD
double IPD0,IPD1,IPD2,IPD3,IPD4;
double z; //DOI after ionization (e.g.=1 for Al0)
double r0; //Ion sphere radius
r0=pow(3.0/4.0/pi/totalc,1.0/3.0);
double debye=sqrt(BOLTZMAN*Te/4.0/pi/pow(totalc+ne,2.0));
// Atoms, solids, and plasmas in super-intense laser fields S.220
IPD0=1.0*3.0/2.0/r0*ECHARGE*ECHARGE*(pow(1.0+pow(debye/r0,3.0),2.0/3.0)-pow(debye/r0,2.0))/4.0/pi/ECONST;
IPD1=2.0*3.0/2.0/r0*ECHARGE*ECHARGE*(pow(1.0+pow(debye/r0,3.0),2.0/3.0)-pow(debye/r0,2.0))/4.0/pi/ECONST;
IPD2=3.0*3.0/2.0/r0*ECHARGE*ECHARGE*(pow(1.0+pow(debye/r0,3.0),2.0/3.0)-pow(debye/r0,2.0))/4.0/pi/ECONST;
IPD3=4.0*3.0/2.0/r0*ECHARGE*ECHARGE*(pow(1.0+pow(debye/r0,3.0),2.0/3.0)-pow(debye/r0,2.0))/4.0/pi/ECONST;
IPD4=5.0*3.0/2.0/r0*ECHARGE*ECHARGE*(pow(1.0+pow(debye/r0,3.0),2.0/3.0)-pow(debye/r0,2.0))/4.0/pi/ECONST;
#endif
//compute partition functions
Q_z0=0.0;
Q_z1=0.0;
Q_z2=0.0;
Q_z3=0.0;
Q_z4=0.0;
Q_z5=0.0;
for(i=0;i<z0_len;++i)
{
#ifdef DOIPD
IPD=IPD0;
#endif
double Ei=STATES_z0[i][2]*eV2J-IPD+mu;
double qi=STATES_z0[i][3]*exp(-(STATES_z0[i][2]-0.0)*eV2J/BOLTZMAN/Te);
if(Ei<0) //depressed state- > truncate partiation function
qi=0;
Q_z0+=qi; //Mit Energien, relativ zum Grundzustand (level=0) dieses Ions
// if(myid==0)
// printf("Z0, qi:%.4e, exp:%.4e, gi:%.4e,Q:%.4e\n", qi,
// exp(-(STATES_z0[i][2]-0.0)*eV2J/BOLTZMAN/Te),
// STATES_z0[i][3], Q_z0);
}
for(i=0;i<z1_len;++i)
{
#ifdef DOIPD
IPD=IPD0;
#endif
double Ei=STATES_z1[i][2]*eV2J-IPD+mu;
double qi=STATES_z1[i][3]*exp(-(STATES_z1[i][2]-STATES_z1[0][2])*eV2J/BOLTZMAN/Te);
if(Ei<0) qi=0;
Q_z1+=qi;
// if(myid==0)
// printf("Z1, qi:%.4e, exp:%.4e, gi:%.4e,Q:%.4e\n", qi,
// exp(-(STATES_z1[i][2]-STATES_z1[0][2])*eV2J/BOLTZMAN/Te),
// STATES_z1[i][3],Q_z1);
}
#if MAXLEVEL > 1
for(i=0;i<z2_len;++i)
{
#ifdef DOIPD
IPD=IPD1;
#endif
double Ei=STATES_z2[i][2]*eV2J-IPD+mu;
double qi=STATES_z2[i][3]*exp(-(STATES_z2[i][2]-STATES_z2[0][2])*eV2J/BOLTZMAN/Te);
if(Ei<0) qi=0;
Q_z2+=qi;
// if(myid==0)
// printf("Z2, qi:%.4e, exp:%.4e, gi:%.4e,Q:%.4e\n", qi,
// exp(-(STATES_z2[i][2]-STATES_z2[0][2])*eV2J/BOLTZMAN/Te),
// STATES_z2[i][3], Q_z2);
}
#endif
#if MAXLEVEL > 2
for(i=0;i<z3_len;++i)
{
#ifdef DOIPD
IPD=IPD2;
#endif
double Ei=STATES_z3[i][2]*eV2J-IPD+mu;
double qi=STATES_z3[i][3]*exp(-(STATES_z3[i][2]-STATES_z3[0][2])*eV2J/BOLTZMAN/Te);
if(Ei<0) qi=0;
Q_z3+=qi;
// if(myid==0)
// printf("Z3, qi:%.4e, exp:%.4e, gi:%.4e,Q:%.4e\n", qi,
// exp(-(STATES_z3[i][2]-STATES_z3[0][2])*eV2J/BOLTZMAN/Te),
// STATES_z3[i][3],Q_z3);
}
#endif
#if MAXLEVEL > 3
for(i=0;i<z4_len;++i)
{
#ifdef DOIPD
IPD=IPD3;
#endif
double Ei=STATES_z4[i][2]*eV2J-IPD+mu;
double qi=STATES_z4[i][3]*exp(-(STATES_z4[i][2]-STATES_z4[0][2])*eV2J/BOLTZMAN/Te);
if(Ei<0) qi=0;
Q_z4+=qi;
// if(myid==0)
// printf("Z4, qi:%.4e, exp:%.4e, gi:%.4e,Q:%.4e\n", qi,
// exp(-(STATES_z4[i][2]-STATES_z4[0][2])*eV2J/BOLTZMAN/Te),
// STATES_z4[i][3],Q_z4 );
}
#endif
/*
for(int i=0;i<z5_len;++i)
Q_z5+=STATES_z5[i][3]*exp(-(STATES_z5[i][2]-STATES_z5[0][2])*eV2J/BOLTZMAN/Te);
*/
// double tmp2,tmp3;
////////////
// Z=0->1 //
////////////
#ifdef DOIPD
// z=1.0;
// IPD=3.0*z*ECHARGE*ECHARGE/2.0/r0/4.0/pi/ECONST; //Ion-Sphere model
IPD=IPD0;
#endif
DeltaE=(STATES_z1[0][2]-0.0)*eV2J-IPD+mu;
// DeltaE=fmax(0.0,DeltaE);
p=exp(-DeltaE/BOLTZMAN/Te);
// r10=2.0/ne*tmp*Q_z1/Q_z0*p; //r10= ratio of ion-concentrations n(Z=1)/n(Z=0); g_e=2 (statistical weight of electron)
r10=Q_z1/Q_z0*p;
// double r01=Q_z0/Q_z1*p;
//printf("IPD0:%f\n",IPD*J2eV);
// if(isnan(r10)!=0 || isinf(r10)!=0)
// {
// char errstr[255];
// sprintf(errstr,"ERROR in Saha-Init: r10 is inf or nan: p0:%.4e,Q_z1:%.4e,Q_z0:%.4e\n",p,Q_z1,Q_z0);
// error(errstr);
// }
// if(myid==0)
// printf("ERROR in Saha-Init: r10 is inf or nan: p0:%.4e,Q_z1:%.4e,Q_z0:%.4e\n",p,Q_z1,Q_z0);
////////////
// Z=1->2 //
////////////
#if MAXLEVEL > 1
#ifdef DOIPD
// z=2.0;
// IPD=3.0*z*ECHARGE*ECHARGE/2.0/r0/4.0/pi/ECONST; //Ion-Sphere model
IPD=IPD1;
#endif
DeltaE=(STATES_z2[0][2]-STATES_z1[0][2])*eV2J-IPD+mu;
// DeltaE=fmax(0.0,DeltaE);
p=exp(-DeltaE/BOLTZMAN/Te);
// p=exp(DeltaE/BOLTZMAN/Te);
// r21=2.0/ne*tmp*Q_z2/Q_z1*p;
r21=Q_z2/Q_z1*p;
// double r12=Q_z1/Q_z2*p;
// if(isnan(r21)!=0 || isinf(r21)!=0)
// {
// char errstr[255];
// sprintf(errstr,"ERROR in Saha-Init: r21 is inf or nan: p2:%.4e,Q_z2:%.4e,Q_z1:%.4e\n",p,Q_z2,Q_z1);
// error(errstr);
// }
#endif
// ////////////
// // Z=2->3 //
// ////////////
#if MAXLEVEL > 2
#ifdef DOIPD
// z=3.0;
// IPD=3.0*z*ECHARGE*ECHARGE/2.0/r0/4.0/pi/ECONST; //Ion-Sphere model
IPD=IPD2;
#endif
DeltaE=(STATES_z3[0][2]-STATES_z2[0][2])*eV2J-IPD+mu;
// DeltaE=fmax(0.0,DeltaE);
p=exp(-DeltaE/BOLTZMAN/Te);
// p=exp(DeltaE/BOLTZMAN/Te);
// r32=2.0/ne*tmp*Q_z3/Q_z2*p;
r32=Q_z3/Q_z2*p;
// double r23=Q_z2/Q_z3*p;
if(isnan(r32)!=0 || isinf(r32)!=0)
{
char errstr[255];
sprintf(errstr,"ERROR in Saha-Init: r32 is inf or nan: p3:%.4e,Q_z3:%.4e,Q_z2:%.4e\n",p,Q_z3,Q_z2);
error(errstr);
}
#endif
// ////////////
// // Z=3->4 //
// ////////////
#if MAXLEVEL > 3
#ifdef DOIPD
// z=4.0;
// IPD=3.0*z*ECHARGE*ECHARGE/2.0/r0/4.0/pi/ECONST; //Ion-Sphere model
IPD=IPD3;
#endif
DeltaE=(STATES_z4[0][2]-STATES_z3[0][2])*eV2J-IPD+mu;
// DeltaE=fmax(0.0,DeltaE);
p=exp(-DeltaE/BOLTZMAN/Te);
// p=exp(DeltaE/BOLTZMAN/Te);
// r43=2.0/ne*tmp*Q_z4/Q_z3*p;
r43=Q_z4/Q_z3*p;
// double r34=Q_z3/Q_z4*p;
// printf("r34:%.4e,p:%.4e,DeltaE/kT:%.4e\n",r34,p,DeltaE/BOLTZMAN/Te);
// if(isnan(r43)!=0 || isinf(r43)!=0)
// {
// char errstr[255];
// sprintf(errstr,"ERROR in Saha-Init: r43 is inf or nan: p4:%.4e,Q_z4:%.4e,Q_z3:%.4e\n",p,Q_z4,Q_z3);
// error(errstr);
// }
#endif
//concentrations from ratios and totalc
n0=totalc/(r43*r32*r21*r10+r32*r21*r10+r21*r10+r10+1.0); // ?
//n0=totalc*Zav/(4*r43*r32*r21*r10 + 3*r32*r21*r10+ 2*r21*r10+1.0);
n1=r10*n0;
//D.h. Neutrals komplett druck-ionisiert
if(Q_z0==0)
{
n0=0.0;
// n1=totalc*Zav/(4*r43*r32*r21 + 3*r32*r21+ 2*r21+1.0);
n1=totalc/(r43*r32*r21 + r32*r21+ r21+1.0);
}
n2=r21*n1;
n3=r32*n2;
n4=r43*n3;
n5=r54*n4;
// Zav=(1*n1+4*n2+9*n3+16*n4+25*n5)/(1*n1+2*n2+3*n3+4*n4+5*n5);
// n4=Zav*totalc/(r12*r23*r34+2*r23*r34+3*r34+4);
// n3=r34*n4;
// n2=r23*n3;
// n1=r12*n2;
// n0=r01*n1;
// if(myid==0)
// {
// printf("n0:%.4e,n1:%.4e,n2:%.4e,n3:%.4e,n4:%.4e\n",n0,n1,n2,n3,n4);
// printf("r10:%.4e,r21:%.4e,r32:%.4e,r43:%.4e\n",r10,r21,r32,r43);
// printf("Qz4:%.4e,Qz3:%.4e,Qz2:%.4e,Qz1:%.4e,Qz0:%.4e\n",Q_z4, Q_z3,Q_z2,Q_z1,Q_z0);
// }
/*
printf("********************************************************************************************************************** \n");
printf(" Initial distribution of concentration according to generalized Saha-Equation\n");
printf(" Ti=Te=%f\n",Te);
printf(" totalc=%.4e\n",totalc);
printf(" Zmean=%.4e,ne:%.6e\n",ne/totalc,ne);
printf(" [Al0]:%.2e,[Al1]:%.2e,[Al2]:%.2e,[Al3]:%.2e,[Al4]:%.2e,[Al5]:%.2e\n",n0,n1,n2,n3,n4,n5);
printf(" I0=%.4e\n",I0);
printf(" t_FWHM=%.4e\n",tFWHM);
printf(" Lambda=%.4e\n",lambda);
printf(" NEQ:%d\n",neq);
printf("********************************************************************************************************************** \n");
*/
// ************************
// * FILL Z0 STATES
// ************************
int ishift=3;
//Now fill states
for(i=0;i<z0_len;++i)
{
DeltaE=(STATES_z0[i][2]-0)*eV2J; // bzgl. ground state
double Ei=0.0;
double prob=exp(-DeltaE/BOLTZMAN/Te);
#ifdef DOIPD
IPD=IPD0;
Ei=STATES_z0[i][2]*eV2J-IPD+mu;
if(Ei<0) prob=0.0;
#endif
if(Q_z0>0)
{
Ith(y,i+ishift)=n0/Q_z0*STATES_z0[i][3]*prob;
if(Ith(y,i+ishift) < MINCONC)
Ith(y,i+ishift)=0.0;
// printf("ITH(%d):%.4e,prob:%.4e,Q_z0:%.4e,dE:%.4e\n",
// i+ishift,Ith(y,i+ishift), prob, Q_z0, DeltaE);
}
if(isnan(Ith(y,i+ishift))!=0 || isinf(Ith(y,i+ishift))!=0)
{
char errstr[255];
sprintf(errstr,"ERROR in do_Saha z0: y is inf or nan! prob:%.4e, Ei:%.4e,DeltaE:%.4e\n",
prob,Ei,DeltaE);
error(errstr);
}
}
// ************************
// * FILL Z1 STATES
// ************************
for(i=0;i<z1_len;++i)
{
// DeltaE=(STATES_z1[i][2]-STATES_z1[0][2])*eV2J-IPD0+EF;
// DeltaE=(STATES_z1[i][2]-STATES_z1[0][2])*eV2J-IPD1+EF;
DeltaE=(STATES_z1[i][2]-STATES_z1[0][2])*eV2J;
double Ei=0.0;
double prob=exp(-DeltaE/BOLTZMAN/Te);
#ifdef DOIPD
IPD=IPD1;
// Ei=(STATES_z1[i][2]-STATES_z1[0][2])*eV2J-IPD0+EF;
// Ei=(STATES_z1[i][2]-STATES_z1[0][2])*eV2J-IPD1+EF;
Ei=(STATES_z1[i][2])*eV2J-IPD0+mu;
if(Ei<0) prob=0.0;
#endif
if(Q_z1>0)
Ith(y,i+ishift+z0_len)=n1/Q_z1*STATES_z1[i][3]*prob;
if(Ith(y,i+ishift+z0_len) < MINCONC)
Ith(y,i+ishift+z0_len)=0.0;
if(isnan(Ith(y,i+ishift+z0_len))!=0 || isinf(Ith(y,i+ishift+z0_len))!=0)
{
char errstr[255];
sprintf(errstr,"ERROR in do_Saha z1: y is inf or nan! prob:%.4e, Ei:%.4e,DeltaE:%.4e\n",
prob,Ei,DeltaE);
error(errstr);
}
}
// ************************
// * FILL Z2 STATES
// ************************
#if MAXLEVEL > 1
for(i=0;i<z2_len;++i)
{
DeltaE=(STATES_z2[i][2]-STATES_z2[0][2])*eV2J;//-IPD2+EF;
double Ei=0.0;
double prob=exp(-DeltaE/BOLTZMAN/Te);
#ifdef DOIPD
IPD=IPD2;
Ei=STATES_z2[i][2]*eV2J-IPD1+mu;
if(Ei<0) prob=0.0;
#endif
if(Q_z2>0)
Ith(y,i+ishift+z0_len+z1_len)=n2/Q_z2*STATES_z2[i][3]*prob;
if(Ith(y,i+ishift+z0_len+z1_len) < MINCONC)
Ith(y,i+ishift+z0_len+z1_len)=0.0;
if(isnan(Ith(y,i+ishift+z0_len+z1_len))!=0 || isinf(Ith(y,i+ishift+z0_len+z1_len))!=0)
{
char errstr[255];
sprintf(errstr,"ERROR in do_Saha z2: y is inf or nan! prob:%.4e, Ei:%.4e,DeltaE:%.4e\n",
prob,Ei,DeltaE);
error(errstr);
}
}
#endif
// ************************
// * FILL Z3 STATES
// ************************
#if MAXLEVEL > 2
for(i=0;i<z3_len;++i)
{
DeltaE=(STATES_z3[i][2]-STATES_z3[0][2])*eV2J;//-IPD3+EF;
double Ei=0.0;
double prob=exp(-DeltaE/BOLTZMAN/Te);
#ifdef DOIPD
IPD=IPD3;
Ei=STATES_z3[i][2]*eV2J-IPD2+mu;
if(Ei<0) prob=0.0;
#endif
if(Q_z3>0)
Ith(y,i+ishift+z0_len+z1_len+z2_len)=n3/Q_z3*STATES_z3[i][3]*prob;
if(Ith(y,i+ishift+z0_len+z1_len+z2_len) < MINCONC)
Ith(y,i+ishift+z0_len+z1_len+z2_len)=0.0;
if(isnan(Ith(y,i+ishift+z0_len+z1_len+z2_len))!=0 || isinf(Ith(y,i+ishift+z0_len+z1_len+z2_len))!=0)
{
char errstr[255];
sprintf(errstr,"ERROR in do_Saha z3: y is inf or nan! prob:%.4e, Ei:%.4e,DeltaE:%4.e\n",
prob,Ei,DeltaE);
error(errstr);
}
}
#endif
// ************************
// * FILL Z4 STATES
// ************************
#if MAXLEVEL > 3
for(i=0;i<z4_len;++i)
{
DeltaE=(STATES_z4[i][2]-STATES_z4[0][2])*eV2J;//-IPD4+EF;
double prob=exp(-DeltaE/BOLTZMAN/Te);
double Ei=0.0;
#ifdef DOIPD
IPD=IPD4;
Ei=STATES_z4[i][2]*eV2J-IPD3+mu;
if(Ei<0) prob=0.0;
#endif
if(Q_z4>0)
Ith(y,i+ishift+z0_len+z1_len+z2_len+z3_len)=n4/Q_z4*STATES_z4[i][3]*prob;
if(Ith(y,i+ishift+z0_len+z1_len+z2_len+z3_len) < MINCONC)
Ith(y,i+ishift+z0_len+z1_len+z2_len+z3_len)=0.0;
if(isnan(Ith(y,i+ishift+z0_len+z1_len+z2_len+z3_len))!=0 || isinf(Ith(y,i+ishift+z0_len+z1_len+z2_len+z3_len))!=0)
{
char errstr[255];
sprintf(errstr,"ERROR in do_Saha z4: y is inf or nan! prob:%.4e, Ei:%.4e,DeltaE:%4.e\n",
prob,Ei,DeltaE);
error(errstr);
}
}
#endif
double totalc_check=0.0;
double n0_check=0.0;
double n1_check=0.0;
double n2_check=0.0;
double n3_check=0.0;
double n4_check=0.0;
for(i=3;i<neq;i++)
{
totalc_check+=Ith(y,i);
if(i-3 < z0_len)
n0_check+=Ith(y,i);
if(i-3 >= z0_len && i-3-z0_len < z1_len)
n1_check+=Ith(y,i);
if(i-3 >= z0_len+z1_len && i-3-z0_len-z1_len < z2_len)
n2_check+=Ith(y,i);
if(i-3 >= z0_len+z1_len+z2_len && i-3-z0_len-z1_len-z2_len < z3_len)
n3_check+=Ith(y,i);
if(i-3 >= z0_len+z1_len+z2_len + z3_len && i-3-z0_len-z1_len-z2_len - z3_len < z4_len)
n4_check+=Ith(y,i);
// if(myid==0) printf("y[%d]:%.4e\n",i,Ith(y,i));
}
/*
if(ABS(totalc-totalc_check) > 0.01*totalc || ABS(n0_check-n0) > 0.01* n0 ||
ABS(n1_check-n1) > 0.01* n1 || ABS(n2_check-n2) > 0.01* n2 || ABS(n3_check-n3) > 0.01* n3 ||
ABS(n4_check-n4) > 0.01* n4 )
if(myid==0)
{
char errstr[400];
sprintf(errstr,"Inconsistency in do_Saha: totalc_check= %.4e, and totalc=%.4e,sum_n:%.4e,"
"n0:%.4e, n1:%.4e,n2:%.4e,n3:%.4e, n4:%.4e\n"
"n0_check:%.4e, n1_check:%.4e,n2_check:%.4e,n3_check:%.4e, n4_check:%.4e\n",
totalc_check,totalc,n1+n2+n3+n4, n0, n1, n2, n3, n4,n0_check,n1_check,n2_check,n3_check,n4_check);
error(errstr);
}
*/
}
// ************************************************************************************************************
// ACTION
// ///////////////////////////////////////////////////////////////////////////////////////////////////////////
int colrad_ydot(double t, N_Vector y, N_Vector colrad_ydot, void *user_data)
{
/*
double t0=1e-12;
double I0=1e17;
double tFWHM=100e-15;
double sigmat=tFWHM/2.3548;
double sigmatsq=sigmat*sigmat;
*/
//It=I0*exp(-pow((t-t0),2.0)/2.0/sigmatsq);
colrad_UserData data;
data = (colrad_UserData) user_data;
double It=data->It;
It=0; //It ist LOKALE Intesität!
bool initial_equi=data->initial_equi; //falls ja, wird temperatur nicht variiert.
double Eexc;
P_E_EE=0.0;
P_E_EI=0.0;
P_E_MPI2=0.0;
P_E_MPI3=0.0;
P_E_RAD_RECOMB=0.0;
double DeltaE;
double kfwd,krev;
double kfwd2; // für 3-Photon absorption
int i,j;
int ishift=3; // UND NICHT =4 (colrad_ydot fängt bei 0 das zählen an!)
int shift,shift1,shift2;
double ne=Ith(y,2);
double Te=Ith(y,0);
double Ti=Ith(y,1);
//FOR REABSORPTION
double Ajk;
double sigma_PI,abs_coeff,thickness,tau,wstark_freq,lambda0,wstark_len,escape_factor,groundstate_ioniz;
thickness=1e-3;
double sigma_tmp=64.0*pow(pi,4.0)*pow(ECHARGE,10.0)*EMASS/3.0/sqrt(3.0)/pow(4.0*pi*ECONST,5.0)/pow(planck,6.0)/LIGHTSPEED/pow(LASERFREQ,3.0)/pow(13.6*eV2J,2.0);
//Pre-Zero <--- MUI IMPORTANTE!!!!
//and count totalc for ipd and stark effect
totalc=0.0;
// #ifdef OMP
// #pragma omp parallel for reduction(+: totalc)
// #endif
for(i=0;i<neq;i++)
{
Ith(colrad_ydot,i)=0.0;
if(i >= 3) totalc+=Ith(y,i);
if(isnan(Ith(y,i))!=0)
{
printf("myid:%d, WARNING Ith(y,%d) became NaN! z0len:%d,z1len:%d,z2len:%d\n",myid,i,z0_len,z1_len,z2_len);
return 1;
}
}
data->ni=totalc;
if(totalc<0)
return 1; // <-- RHS fail
// IPD KRAM
double IPD0,IPD1,IPD2,IPD3,IPD4;
IPD0=IPD1=IPD2=IPD3=0.0;
double EF=fermi_E(ne);
data->EF=EF;
#ifdef DOIPD
double r0,debye;
r0=pow(3.0/4.0/pi/totalc,1.0/3.0);
debye=sqrt(BOLTZMAN*Te/4.0/pi/pow(totalc+ne,2.0));
// Atoms, solids, and plasmas in super-intense laser fields S.220
IPD0=1.0*3.0/2.0/r0*ECHARGE*ECHARGE*(pow(1.0+pow(debye/r0,3.0),2.0/3.0)-pow(debye/r0,2.0))/4.0/pi/ECONST;
IPD1=2.0*3.0/2.0/r0*ECHARGE*ECHARGE*(pow(1.0+pow(debye/r0,3.0),2.0/3.0)-pow(debye/r0,2.0))/4.0/pi/ECONST;
IPD2=3.0*3.0/2.0/r0*ECHARGE*ECHARGE*(pow(1.0+pow(debye/r0,3.0),2.0/3.0)-pow(debye/r0,2.0))/4.0/pi/ECONST;
IPD3=4.0*3.0/2.0/r0*ECHARGE*ECHARGE*(pow(1.0+pow(debye/r0,3.0),2.0/3.0)-pow(debye/r0,2.0))/4.0/pi/ECONST;
IPD4=5.0*3.0/2.0/r0*ECHARGE*ECHARGE*(pow(1.0+pow(debye/r0,3.0),2.0/3.0)-pow(debye/r0,2.0))/4.0/pi/ECONST;
data->IPD0=IPD0;
data->IPD1=IPD1;
data->IPD2=IPD2;
data->IPD3=IPD3;
data->IPD4=IPD4;
#endif
int retval=colrad_GetCoeffs(y,It,data);
if(retval !=0 )
return 1; //d.h. failure of RHS
//printf("IPD0:%.4e,IPD1:%.4e,IPD2:%.4e\n", IPD0*J2eV,IPD1*J2eV,IPD2*J2eV);
//**********************************************
//Z=0, Exec/De-Exec + SPONTANEOUS EMISSION
//**********************************************
#ifdef OMP //OMP FUNZT AUF DIESE WEISE NICHT !!!
//#pragma omp parallel for schedule(static) collapse(2) private(DeltaE,kfwd,krev,lambda0,Ajk,escape_factor,Eexc) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,DeltaE,kfwd,krev,lambda0,Ajk,escape_factor,Eexc)
#endif
for(i=0;i<z0_len;++i)
{
for(j=0;j<z0_len;++j)
{
if(j<=i) continue; // MUI IMPORTANTE
double engi=STATES_z0[i][2]*eV2J-IPD0+EF;
if(engi < 0) continue; //depressed state is continuum
DeltaE=(STATES_z0[j][2]-STATES_z0[i][2])*eV2J;
kfwd=k_EE_z0_z0[i][j]*Ith(y,i+ishift)*ne;
krev=k_EE_z0_z0_b[i][j]*Ith(y,j+ishift)*ne;
//exec. reduces conc. of i state and increases conc. of j state
Ith(colrad_ydot,i+ishift) -=kfwd;
Ith(colrad_ydot,j+ishift) +=kfwd;
//de-excec. increases conc. of i state & decr. conc. of j state
Ith(colrad_ydot,i+ishift)+=krev;
Ith(colrad_ydot,j+ishift)-=krev;
Eexc= (-kfwd+krev)*DeltaE;
P_E_EE+=Eexc;
#ifdef SPONT
// ********SPONT EMISSION ********* //
//Spont.emiss: delta-n !=>0 und delta-l=+-1 (nur opt.allowed trans.)
if((STATES_z0[j][5]-STATES_z0[i][5])>0 && STATES_z0[j][4]-STATES_z0[i][4]==1)
{
Ajk=0.0;
escape_factor=1.0;
lambda0=planck*LIGHTSPEED/DeltaE;
Ajk=EinsteinCoeff(STATES_z0[i][5],STATES_z0[j][5],STATES_z0[j][3],DeltaE);
krev=Ith(y,ishift+j)*Ajk; //neues krev
Ith(colrad_ydot,ishift+j) -= krev;
Ith(colrad_ydot,ishift+i) += krev;
//P_A_SE+=krev*DeltaE; ??
}
#endif
}
}
// *************************************
//Z=1, Exec/De-Exec & SPONTANEOUS EMISSION
// ***************************************
shift2=z0_len;
#ifdef OMP
//#pragma omp parallel for schedule(dynamic,1) collapse(2) private(DeltaE,kfwd,krev,lambda0,Ajk,escape_factor,Eexc) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,DeltaE,kfwd,krev,lambda0,Ajk,escape_factor,Eexc)
#endif
for(i=0;i<z1_len;++i)
{
for(j=0;j<z1_len;++j)
{
if(i<=j) continue; // MUI IMPORTANTE
double engi=STATES_z1[i][2]*eV2J-IPD0+EF;
if(engi < 0) continue; //depressed state is continuum
DeltaE=(STATES_z1[j][2]-STATES_z1[i][2])*eV2J;
kfwd=k_EE_z1_z1[i][j]*Ith(y,i+ishift+shift2)*ne;
krev=k_EE_z1_z1_b[i][j]*Ith(y,j+ishift+shift2)*ne;
//exec. reduces conc. of i state and increases conc. of j state
Ith(colrad_ydot,i+ishift+shift2) -=kfwd;
Ith(colrad_ydot,j+ishift+shift2) +=kfwd;
//de-excec. increases conc. of i state & decr. conc. of j state
Ith(colrad_ydot,i+ishift+shift2)+=krev;
Ith(colrad_ydot,j+ishift+shift2)-=krev;
// if(myid==1)
// printf("Z=1:kfwd:%.4e,krev:%.4e, yi:%.4e,yj:%.4e\n", kfwd,krev, Ith(y,i+ishift+shift2), Ith(y,j+ishift+shift2));
Eexc= (-kfwd+krev)*DeltaE;
P_E_EE+=Eexc;
#ifdef SPONT
// ********SPONT EMISSION ********* //
//Spont.emiss: delta-n !=>0 und delta-l=+-1 (nur opt.allowed trans.)
if((STATES_z1[j][5]-STATES_z1[i][5])>0 && STATES_z1[j][4]-STATES_z1[i][4]==1)
{
escape_factor=1.0;
lambda0=planck*LIGHTSPEED/DeltaE;
Ajk=EinsteinCoeff(STATES_z1[i][5],STATES_z1[j][5],STATES_z1[j][3],DeltaE);
krev=Ith(y,ishift+shift2+j)*Ajk;
Ith(colrad_ydot,ishift+shift2+j) -= krev;
Ith(colrad_ydot,ishift+shift2+i) += krev;
//P_A_SE+=krev*DeltaE; ??
}
#endif
}
}
// // *************************************
// //Z=2, Exec/De-Exec & SPONTANEOUS EMISSION
// // ***************************************
#if MAXLEVEL > 1
shift2=z0_len+z1_len;
#ifdef OMP
//#pragma omp parallel for schedule(dynamic,1) collapse(2) private(DeltaE,kfwd,krev,lambda0,Ajk,escape_factor,Eexc) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,DeltaE,kfwd,krev,lambda0,Ajk,escape_factor,Eexc)
#endif
for(i=0;i<z2_len;++i)
{
for(j=0;j<z2_len;++j)
{
if(j<=i) continue; // MUI IMPORTANTE
double engi=STATES_z2[i][2]*eV2J-IPD1+EF;
if(engi < 0) continue; //depressed state is continuum
DeltaE=(STATES_z2[j][2]-STATES_z2[i][2])*eV2J;
kfwd=k_EE_z2_z2[i][j]*Ith(y,i+ishift+shift2)*ne;
krev=k_EE_z2_z2_b[i][j]*Ith(y,j+ishift+shift2)*ne;
//exec. reduces conc. of i state and increases conc. of j state
Ith(colrad_ydot,i+ishift+shift2) -=kfwd;
Ith(colrad_ydot,j+ishift+shift2) +=kfwd;
//de-excec. increases conc. of i state & decr. conc. of j state
Ith(colrad_ydot,i+ishift+shift2)+=krev;
Ith(colrad_ydot,j+ishift+shift2)-=krev;
// if(myid==1)
// printf("Z=2:kfwd:%.4e,krev:%.4e, yi:%.4e,yj:%.4e\n", kfwd,krev, Ith(y,i+ishift+shift2), Ith(y,j+ishift+shift2));
// printf("kfwd:%.5e, krev:%.5e,i:%d,j:%d,keefwd:%.4e,keeb:%.4e,T:%.4e\n",kfwd,krev,i,j,k_EE_z2_z2[i][j], k_EE_z2_z2_b[i][j],Te);
Eexc= (-kfwd+krev)*DeltaE;
P_E_EE+=Eexc;
#ifdef SPONT
// ********SPONT EMISSION ********* //
//Spont.emiss: delta-n !=>0 und delta-l=+-1 (nur opt.allowed trans.)
if((STATES_z2[j][5]-STATES_z2[i][5])>0 && STATES_z2[j][4]-STATES_z2[i][4]==1)
{
escape_factor=1.0;
lambda0=planck*LIGHTSPEED/DeltaE;
Ajk=EinsteinCoeff(STATES_z2[i][5],STATES_z2[j][5],STATES_z2[j][3],DeltaE);
krev=Ith(y,ishift+shift2+j)*Ajk;
Ith(colrad_ydot,ishift+shift2+j) -= krev;
Ith(colrad_ydot,ishift+shift2+i) += krev;
//P_A_SE+=krev*DeltaE; ??
}
#endif
}
}
#endif
// *************************************
//Z=3, Exec/De-Exec & SPONTANEOUS EMISSION
// ***************************************
#if MAXLEVEL > 2
shift2=z0_len+z1_len+z2_len;
#ifdef OMP
// #pragma omp parallel for schedule(dynamic,1) collapse(2) private(DeltaE,kfwd,krev,lambda0,Ajk,escape_factor,Eexc) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,DeltaE,kfwd,krev,lambda0,Ajk,escape_factor,Eexc)
#endif
for(i=0;i<z3_len;++i)
{
for(j=0;j<z3_len;++j)
{
if(j<=i) continue; // MUI IMPORTANTE
double engi=STATES_z3[i][2]*eV2J-IPD2+EF;
if(engi < 0) continue; //depressed state is continuum
DeltaE=(STATES_z3[j][2]-STATES_z3[i][2])*eV2J;
kfwd=k_EE_z3_z3[i][j]*Ith(y,i+ishift+shift2)*ne;
krev=k_EE_z3_z3_b[i][j]*Ith(y,j+ishift+shift2)*ne;
// if(myid==1)
// printf("Z=3 kfwd:%.4e,krev:%.4e, yi:%.4e,yj:%.4e\n", kfwd,krev, Ith(y,i+ishift+shift2), Ith(y,j+ishift+shift2));
//exec. reduces conc. of i state and increases conc. of j state
Ith(colrad_ydot,i+ishift+shift2) -=kfwd;
Ith(colrad_ydot,j+ishift+shift2) +=kfwd;
//de-excec. increases conc. of i state & decr. conc. of j state
Ith(colrad_ydot,i+ishift+shift2)+=krev;
Ith(colrad_ydot,j+ishift+shift2)-=krev;
Eexc= (-kfwd+krev)*DeltaE;
P_E_EE+=Eexc;
// if(Ith(y,i+ishift+shift2) >0 || Ith(y,j+ishift+shift2)>0)
// printf("myid:%d, i:%d,j:%d,kEE3:%.4e, kEE3b:%.4e,kfwd:%.4e,krev:%.4e,ni:%.4e,nj:%.4e,ne:%.4e\n",myid,i,j,
// k_EE_z3_z3[i][j],k_EE_z3_z3_b[i][j],kfwd,krev,
// Ith(y,i+ishift+shift2),Ith(y,j+ishift+shift2),ne );
#ifdef SPONT
// ********SPONT EMISSION ********* //
//Spont.emiss: delta-n !=>0 und delta-l=+-1 (nur opt.allowed trans.)
if((STATES_z3[j][5]-STATES_z3[i][5])>0 && STATES_z3[j][4]-STATES_z3[i][4]==1)
{
escape_factor=1.0;
lambda0=planck*LIGHTSPEED/DeltaE;
Ajk=EinsteinCoeff(STATES_z3[i][5],STATES_z3[j][5],STATES_z3[j][3],DeltaE);
krev=Ith(y,ishift+shift2+j)*Ajk;
Ith(colrad_ydot,ishift+shift2+j) -= krev;
Ith(colrad_ydot,ishift+shift2+i) += krev;
//P_A_SE+=krev*DeltaE; ??
}
#endif
}
}
#endif //MAXLEVEL > 2
// *************************************
//Z=4, Exec/De-Exec & SPONTANEOUS EMISSION
// ***************************************
#if MAXLEVEL > 3
shift2=z0_len+z1_len+z2_len+z3_len;
#ifdef OMP
// #pragma omp parallel for schedule(dynamic,1) collapse(2) private(DeltaE,kfwd,krev,lambda0,Ajk,escape_factor,Eexc) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,DeltaE,kfwd,krev,lambda0,Ajk,escape_factor,Eexc)
#endif
for(i=0;i<z4_len;++i)
{
for(j=0;j<z4_len;++j)
{
if(j<=i) continue; // MUI IMPORTANTE
DeltaE=(STATES_z4[j][2]-STATES_z4[i][2])*eV2J-IPD3+EF;
kfwd=k_EE_z4_z4[i][j]*Ith(y,i+ishift+shift2)*ne;
krev=k_EE_z4_z4_b[i][j]*Ith(y,j+ishift+shift2)*ne;
//exec. reduces conc. of i state and increases conc. of j state
Ith(colrad_ydot,i+ishift+shift2) -=kfwd;
Ith(colrad_ydot,j+ishift+shift2) +=kfwd;
//de-excec. increases conc. of i state & decr. conc. of j state
Ith(colrad_ydot,i+ishift+shift2)+=krev;
Ith(colrad_ydot,j+ishift+shift2)-=krev;
// if(myid==1)
// printf("Z=4:kfwd:%.4e,krev:%.4e, yi:%.4e,yj:%.4e\n", kfwd,krev, Ith(y,i+ishift+shift2), Ith(y,j+ishift+shift2));
Eexc= (-kfwd+krev)*DeltaE;
P_E_EE+=Eexc;
#ifdef SPONT
// ********SPONT EMISSION ********* //
//Spont.emiss: delta-n !=>0 und delta-l=+-1 (nur opt.allowed trans.)
if((STATES_z4[j][5]-STATES_z4[i][5])>0 && STATES_z4[j][4]-STATES_z4[i][4]==1)
{
escape_factor=1.0;
lambda0=planck*LIGHTSPEED/DeltaE;
Ajk=EinsteinCoeff(STATES_z3[i][5],STATES_z3[j][5],STATES_z3[j][3],DeltaE);
krev=Ith(y,ishift+shift2+j)*Ajk;
Ith(colrad_ydot,ishift+shift2+j) -= krev;
Ith(colrad_ydot,ishift+shift2+i) += krev;
//P_A_SE+=krev*DeltaE; ??
}
#endif
}
}
#endif // MAXLEVEL > 3
// ********************************************************
// NOW IONIZATION RATES
// ********************************************************
// ***************************
//Z=0->Z=1, Ioniz./Recomb.
// ***************************
shift1=ishift;
shift2=ishift+z0_len;
#ifdef OMP
// #pragma omp parallel for schedule(dynamic,1) collapse(2) private(DeltaE,kfwd,kfwd2,krev,\
escape_factor,groundstate_ioniz,sigma_PI,abs_coeff,tau,wstark_freq,lambda0,Eexc) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,DeltaE,kfwd,kfwd2,krev,\
escape_factor,groundstate_ioniz,sigma_PI,abs_coeff,tau,wstark_freq,lambda0,Eexc)
#endif
for(i=0;i<z0_len;++i)
{
for(j=0;j<z1_len;++j)
{
DeltaE=(STATES_z1[j][2]-STATES_z0[i][2])*eV2J-IPD0+EF;
#ifdef DOIPD
if(DeltaE<0) continue;
DeltaE=MAX(0.0,DeltaE);
#endif
//COLL IONIZ
Ith(colrad_ydot,i+shift1) -= k_EI_z0_z1[i][j]*Ith(y,i+shift1)*ne;
Ith(colrad_ydot,2) += k_EI_z0_z1[i][j]*Ith(y,i+shift1)*ne; //Ne inc.
Ith(colrad_ydot,j+shift2) += k_EI_z0_z1[i][j]*Ith(y,i+shift1)*ne;
//3-body recomb
Ith(colrad_ydot,j+shift2) -= k_EI_z1_z0[i][j]*Ith(y,j+shift2)*ne*ne;
Ith(colrad_ydot,i+shift1) += k_EI_z1_z0[i][j]*Ith(y,j+shift2)*ne*ne;
Ith(colrad_ydot,2) -= k_EI_z1_z0[i][j]*Ith(y,j+shift2)*ne*ne;
Eexc = -k_EI_z0_z1[i][j]*Ith(y,i+shift1)*ne*DeltaE;
Eexc += k_EI_z1_z0[i][j]*Ith(y,j+shift2)*ne*ne*DeltaE;
P_E_EI += Eexc;
//////////
// MPI
//////////
escape_factor=1.0;
#ifdef MULTIPHOTON
#ifdef STARK
if(k_MPI_z1_z0[i][j][0]>0 && STATES_z1[j][5]>STATES_z0[i][5])
{
//außerdem geht die quasi-static. approx. nur mit nu-nl>0
groundstate_ioniz=(STATES_z1[0][2]-STATES_z0[0][2])*eV2J;
sigma_PI=sigma_tmp*pow(DeltaE,2.5)/sqrt(groundstate_ioniz);
abs_coeff=sigma_PI*ne; //cross-section mit Pauli-blocking-faktor korrigieren!
tau=abs_coeff*thickness;
wstark_freq=StarkWidth(STATES_z1[j][5],STATES_z0[i][5],Te/11605,Ti/11605,Ti/11605,ne,totalc);
lambda0=planck*LIGHTSPEED/DeltaE;
wstark_len=wstark_freq*lambda0*lambda0/LIGHTSPEED;
escape_factor=EscapeFactor(wstark_len*1e10,tau);
}
#endif
//ACHTUNG: Diskussion ob MPI sich überhaupt für Potential-Lowering interessiert...?
if(DeltaE >0 ) //DeltaE ist bereits abzgl. IPD
{
kfwd= k_MPI_z0_z1[i][j][0]*Ith(y,i+shift1); // Einheit: k_MPI = 1/s
kfwd2=k_MPI_z0_z1[i][j][1]*Ith(y,i+shift1); // *wup; // 2 photon- and 3-photon ionization!
krev=k_MPI_z1_z0[i][j][0]*Ith(y,j+shift2)*ne;//*escape_factor; // *wlo; // einheit: k_RADRECOMB= m^3/s
//kfwd2=0.0; // 3-photon-ioniz. off
Ith(colrad_ydot,shift1+i) -=(kfwd +kfwd2);
Ith(colrad_ydot,2) +=(kfwd +kfwd2); //Ne inc.
Ith(colrad_ydot,shift2+j) +=(kfwd +kfwd2);
//krev=fmin(krev,kfwd);
Ith(colrad_ydot,shift2+j) -=krev;
Ith(colrad_ydot,shift1+i) +=krev;
Ith(colrad_ydot,2)-=krev;
// für 2 photonen und rad.recomb
P_E_MPI2 += kfwd* (2.0*planck*LASERFREQ-(DeltaE)); //kein heating durch rad.recomb->Photon verschwindet (bisher ohne self-absorption)
//jetzt für 3 photonen
P_E_MPI3 += kfwd2*(3.0*planck*LASERFREQ-(DeltaE));
//jetzt rad recomb
P_E_RAD_RECOMB -= krev*(DeltaE)*escape_factor; //Radiative cooling, c.f. http://www.astronomy.ohio-state.edu/~dhw/A825/notes8.pdf S.2
}
#endif //MPI
}
}
// ***************************
// Z=1->Z=2, Ioniz./Recomb.
// ***************************
#if MAXLEVEL > 1
shift1=ishift+z0_len;
shift2=ishift+z0_len+z1_len;
#ifdef OMP
//#pragma omp parallel for schedule(dynamic,1) collapse(2) private(DeltaE,kfwd,kfwd2,krev,\
escape_factor,groundstate_ioniz,sigma_PI,abs_coeff,tau,wstark_freq,lambda0,Eexc) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,DeltaE,kfwd,kfwd2,krev,\
escape_factor,groundstate_ioniz,sigma_PI,abs_coeff,tau,wstark_freq,lambda0,Eexc)
#endif
for(i=0;i<z1_len;++i)
{
for(j=0;j<z2_len;++j)
{
DeltaE=(STATES_z2[j][2]-STATES_z1[i][2])*eV2J-IPD1+EF;
#ifdef DOIPD
if(DeltaE<0) continue;
DeltaE=MAX(0.0,DeltaE);
#endif
//COLL IONIZ
Ith(colrad_ydot,i+shift1) -= k_EI_z1_z2[i][j]*Ith(y,i+shift1)*ne;
Ith(colrad_ydot,2) += k_EI_z1_z2[i][j]*Ith(y,i+shift1)*ne; //Ne inc.
Ith(colrad_ydot,j+shift2) += k_EI_z1_z2[i][j]*Ith(y,i+shift1)*ne;
//3-body recomb
Ith(colrad_ydot,shift2+j) -= k_EI_z2_z1[i][j]*Ith(y,j+shift2)*ne*ne;
Ith(colrad_ydot,shift1+i) += k_EI_z2_z1[i][j]*Ith(y,j+shift2)*ne*ne;
Ith(colrad_ydot,2) -= k_EI_z2_z1[i][j]*Ith(y,j+shift2)*ne*ne;
Eexc = -k_EI_z1_z2[i][j]*Ith(y,i+shift1)*ne*DeltaE;
Eexc += k_EI_z2_z1[i][j]*Ith(y,j+shift2)*ne*ne*DeltaE;
P_E_EI += Eexc;
//////////
// MPI
//////////
escape_factor=1.0;
#ifdef MULTIPHOTON
#ifdef STARK
if(k_MPI_z2_z1[i][j][0]>0 && STATES_z2[j][5]>STATES_z1[i][5])
{
//außerdem geht die quasi-static. approx. nur mit nu-nl>0
groundstate_ioniz=(STATES_z2[0][2]-STATES_z1[0][2])*eV2J;
sigma_PI=sigma_tmp*pow(DeltaE,2.5)/sqrt(groundstate_ioniz);
abs_coeff=sigma_PI*ne; //cross-section mit Pauli-blocking-faktor korrigieren!
tau=abs_coeff*thickness;
wstark_freq=StarkWidth(STATES_z2[j][5],STATES_z1[i][5],Te/11605,Ti/11605,Ti/11605,ne,totalc);
lambda0=planck*LIGHTSPEED/DeltaE;
wstark_len=wstark_freq*lambda0*lambda0/LIGHTSPEED;
escape_factor=EscapeFactor(wstark_len*1e10,tau);
}
#endif
if(DeltaE >0)
{
kfwd= k_MPI_z1_z2[i][j][0]*Ith(y,i+shift1); // *wup;
kfwd2=k_MPI_z1_z2[i][j][1]*Ith(y,i+shift1); // *wup; // 2 photon- and 3-photon ionization!
krev=k_MPI_z2_z1[i][j][0]*Ith(y,shift2+j)*ne; // *wlo; //eigentlich Rad-recomb.!=k_MPI_rev, einheit von k=m^3/s
//kfwd2=0.0; // 3-photon-ioniz. off
Ith(colrad_ydot,i+shift1) -=(kfwd +kfwd2);
Ith(colrad_ydot,2) +=(kfwd +kfwd2); //Ne inc.
Ith(colrad_ydot,shift2+j) +=(kfwd +kfwd2);
//krev=fmin(krev,kfwd);
Ith(colrad_ydot,shift2+j) -=krev;
Ith(colrad_ydot,shift1+i) +=krev;
Ith(colrad_ydot,2)-=krev;
// für 2 photonen und rad.recomb
P_E_MPI2 += kfwd* (2.0*planck*LASERFREQ-(DeltaE)); //kein heating durch rad.recomb->Photon verschwindert (bisher ohne self-absorption)
//jetzt für 3 photonen
P_E_MPI3 += kfwd2*(3.0*planck*LASERFREQ-(DeltaE));
//jetzt rad recomb
P_E_RAD_RECOMB -= krev*(DeltaE)*escape_factor;
}
#endif //MPI
}
}
#endif // MAXLEVEL > 1
// ***************************
// Z=2->Z=3, Ioniz./Recomb.
// ***************************
#if MAXLEVEL > 2
shift1=ishift+z0_len+z1_len;
shift2=ishift+z0_len+z1_len+z2_len;
#ifdef OMP
//#pragma omp parallel for schedule(dynamic,1) collapse(2) private(DeltaE,kfwd,kfwd2,krev,\
escape_factor,groundstate_ioniz,sigma_PI,abs_coeff,tau,wstark_freq,lambda0,Eexc) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,DeltaE,kfwd,kfwd2,krev,\
escape_factor,groundstate_ioniz,sigma_PI,abs_coeff,tau,wstark_freq,lambda0,Eexc)
#endif
for(i=0;i<z2_len;++i)
{
for(j=0;j<z3_len;++j)
{
DeltaE=(STATES_z3[j][2]-STATES_z2[i][2])*eV2J-IPD2+EF;
#ifdef DOIPD
if(DeltaE<0) continue;
DeltaE=MAX(0.0,DeltaE);
#endif
//COLL IONIZ
Ith(colrad_ydot,i+shift1) -= k_EI_z2_z3[i][j]*Ith(y,i+shift1)*ne;
Ith(colrad_ydot,2) += k_EI_z2_z3[i][j]*Ith(y,i+shift1)*ne; //Ne inc.
Ith(colrad_ydot,j+shift2) += k_EI_z2_z3[i][j]*Ith(y,i+shift1)*ne;
//3-body recomb
Ith(colrad_ydot,shift2+j) -= k_EI_z3_z2[i][j]*Ith(y,j+shift2)*ne*ne;
Ith(colrad_ydot,shift1+i) += k_EI_z3_z2[i][j]*Ith(y,j+shift2)*ne*ne;
Ith(colrad_ydot,2) -= k_EI_z3_z2[i][j]*Ith(y,j+shift2)*ne*ne;
Eexc = -k_EI_z2_z3[i][j]*Ith(y,i+shift1)*ne*DeltaE;
Eexc += k_EI_z3_z2[i][j]*Ith(y,j+shift2)*ne*ne*DeltaE;
P_E_EI += Eexc;
//////////
// MPI
//////////
escape_factor=1.0;
#ifdef MULTIPHOTON
#ifdef STARK
if(k_MPI_z3_z2[i][j][0]>0 && STATES_z3[j][5]>STATES_z2[i][5])
{
//außerdem geht die quasi-static. approx. nur mit nu-nl>0
groundstate_ioniz=(STATES_z3[0][2]-STATES_z2[0][2])*eV2J;
sigma_PI=sigma_tmp*pow(DeltaE,2.5)/sqrt(groundstate_ioniz);
abs_coeff=sigma_PI*ne; //cross-section mit Pauli-blocking-faktor korrigieren!
tau=abs_coeff*thickness;
wstark_freq=StarkWidth(STATES_z3[j][5],STATES_z2[i][5],Te/11605,Ti/11605,Ti/11605,ne,totalc);
lambda0=planck*LIGHTSPEED/DeltaE;
wstark_len=wstark_freq*lambda0*lambda0/LIGHTSPEED;
escape_factor=EscapeFactor(wstark_len*1e10,tau);
}
#endif
if(DeltaE >0)
{
kfwd= k_MPI_z2_z3[i][j][0]*Ith(y,i+shift1); // *wup;
kfwd2=k_MPI_z3_z2[i][j][1]*Ith(y,i+shift1); // *wup; // 2 photon- and 3-photon ionization!
krev=k_MPI_z3_z2[i][j][0]*Ith(y,shift2+j)*ne; // *wlo; //eigentlich Rad-recomb.!=k_MPI_rev, einheit von k=m^3/s
//kfwd2=0.0; // 3-photon-ioniz. off
Ith(colrad_ydot,i+shift1) -=(kfwd +kfwd2);
Ith(colrad_ydot,2) +=(kfwd +kfwd2); //Ne inc.
Ith(colrad_ydot,shift2+j) +=(kfwd +kfwd2);
//krev=fmin(krev,kfwd);
Ith(colrad_ydot,shift2+j) -=krev;
Ith(colrad_ydot,shift1+i) +=krev;
Ith(colrad_ydot,2)-=krev;
// für 2 photonen und rad.recomb
P_E_MPI2 += kfwd* (2.0*planck*LASERFREQ-(DeltaE)); //kein heating durch rad.recomb->Photon verschwindert (bisher ohne self-absorption)
//jetzt für 3 photonen
P_E_MPI3 += kfwd2*(3.0*planck*LASERFREQ-(DeltaE));
//jetzt rad recomb
P_E_RAD_RECOMB -= krev*(DeltaE)*escape_factor;
}
#endif //MPI
}
}
#endif // MAXLEVEL > 2
// ***************************
// Z=3->Z=4, Ioniz./Recomb.
// ***************************
#if MAXLEVEL > 3
shift1=ishift+z0_len+z1_len+z2_len;
shift2=ishift+z0_len+z1_len+z2_len+z3_len;
#ifdef OMP
//#pragma omp parallel for schedule(dynamic,1) collapse(2) private(DeltaE,kfwd,kfwd2,krev,\
escape_factor,groundstate_ioniz,sigma_PI,abs_coeff,tau,wstark_freq,lambda0,Eexc) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,DeltaE,kfwd,kfwd2,krev,\
escape_factor,groundstate_ioniz,sigma_PI,abs_coeff,tau,wstark_freq,lambda0,Eexc)
#endif
for(i=0;i<z3_len;++i)
{
for(j=0;j<z4_len;++j)
{
DeltaE=(STATES_z4[j][2]-STATES_z3[i][2])*eV2J-IPD3+EF;
#ifdef DOIPD
if(DeltaE<0) continue;
DeltaE=MAX(0.0,DeltaE);
#endif
//COLL IONIZ
Ith(colrad_ydot,i+shift1) -= k_EI_z3_z4[i][j]*Ith(y,i+shift1)*ne;
Ith(colrad_ydot,2) += k_EI_z3_z4[i][j]*Ith(y,i+shift1)*ne; //Ne inc.
Ith(colrad_ydot,j+shift2) += k_EI_z3_z4[i][j]*Ith(y,i+shift1)*ne;
//3-body recomb
Ith(colrad_ydot,shift2+j) -= k_EI_z4_z3[i][j]*Ith(y,j+shift2)*ne*ne;
Ith(colrad_ydot,shift1+i) += k_EI_z4_z3[i][j]*Ith(y,j+shift2)*ne*ne;
Ith(colrad_ydot,2) -= k_EI_z4_z3[i][j]*Ith(y,j+shift2)*ne*ne;
Eexc = -k_EI_z3_z4[i][j]*Ith(y,i+shift1)*ne*DeltaE;
Eexc += k_EI_z4_z3[i][j]*Ith(y,j+shift2)*ne*ne*DeltaE;
P_E_EI += Eexc;
//////////
// MPI
//////////
escape_factor=1.0;
#ifdef MULTIPHOTON
#ifdef STARK
if(k_MPI_z4_z3[i][j][0]>0 && STATES_z4[j][5]>STATES_z3[i][5])
{
//außerdem geht die quasi-static. approx. nur mit nu-nl>0
groundstate_ioniz=(STATES_z4[0][2]-STATES_z3[0][2])*eV2J;
sigma_PI=sigma_tmp*pow(DeltaE,2.5)/sqrt(groundstate_ioniz);
abs_coeff=sigma_PI*ne; //cross-section mit Pauli-blocking-faktor korrigieren!
tau=abs_coeff*thickness;
wstark_freq=StarkWidth(STATES_z4[j][5],STATES_z3[i][5],Te/11605,Ti/11605,Ti/11605,ne,totalc);
lambda0=planck*LIGHTSPEED/DeltaE;
wstark_len=wstark_freq*lambda0*lambda0/LIGHTSPEED;
escape_factor=EscapeFactor(wstark_len*1e10,tau);
}
#endif
if(DeltaE >0)
{
kfwd= k_MPI_z3_z4[i][j][0]*Ith(y,i+shift1); // *wup;
kfwd2=k_MPI_z4_z3[i][j][1]*Ith(y,i+shift1); // *wup; // 2 photon- and 3-photon ionization!
krev=k_MPI_z4_z3[i][j][0]*Ith(y,shift2+j)*ne; // *wlo; //eigentlich Rad-recomb.!=k_MPI_rev, einheit von k=m^3/s
//kfwd2=0.0; // 3-photon-ioniz. off
Ith(colrad_ydot,i+shift1) -=(kfwd +kfwd2);
Ith(colrad_ydot,2) +=(kfwd +kfwd2); //Ne inc.
Ith(colrad_ydot,shift2+j) +=(kfwd +kfwd2);
//krev=fmin(krev,kfwd);
Ith(colrad_ydot,shift2+j) -=krev;
Ith(colrad_ydot,shift1+i) +=krev;
Ith(colrad_ydot,2)-=krev;
// für 2 photonen und rad.recomb
P_E_MPI2 += kfwd* (2.0*planck*LASERFREQ-(DeltaE)); //kein heating durch rad.recomb->Photon verschwindert (bisher ohne self-absorption)
//jetzt für 3 photonen
P_E_MPI3 += kfwd2*(3.0*planck*LASERFREQ-(DeltaE));
//jetzt rad recomb
P_E_RAD_RECOMB -= krev*(DeltaE)*escape_factor;
}
#endif //MPI
}
}
#endif //MAXLEVEL > 3
// ********************** THERMO ******************************************
// double cvinv=1.0/(1.5*BOLTZMAN*ne);
double P_E_TOTAL=P_E_EI+P_E_EE+P_E_MPI2+P_E_MPI3+P_E_RAD_RECOMB;
data->P_TOTAL=P_E_TOTAL;
data->P_EE=P_E_EE;
data->P_EI=P_E_EI;
data->P_MPI2=P_E_MPI2;
data->P_MPI3=P_E_MPI3;
data->P_RR=P_E_RAD_RECOMB;
// printf("myid:%d, PEI:%.4e, PEE:%.4e,MPI2:%.4e, MPI3:%.4e, RADREC:%.4e\n",
// myid,
// P_E_EI,
// P_E_EE,
// P_E_MPI2,
// P_E_MPI3,
// P_E_RAD_RECOMB);
//BEI PRE-EQUILIBRIERUNG T=CONST !
if(initial_equi==false)
{
//double cvinv= 1.0/EOS_cve_from_r_te(data->dens, Te);
double cvinv= 1.0/Cv(Te/11604.5, ne);
Ith(colrad_ydot,0) = cvinv*P_E_TOTAL;
}
else
{
Ith(colrad_ydot,0)=0.0;
}
// if(myid==1)
// for(i=0;i<neq;i++)
// {
// if(ABS(Ith(colrad_ydot,i)) > 1e-12)
// printf("myid:%d, t:%.4e, i:%d, y[i]:%.4e, dot[i]:%.4e\n",myid,t,i, Ith(y,i), Ith(colrad_ydot,i));
// }
return 0; // 0 heisst alles ok
}
// ********************************************************************+
// * COMPUTE RATE COEFFS
// ********************************************************************+
int colrad_GetCoeffs(N_Vector y,double It,void *user_data)
{
int i,j,k;
double kronecker;
double a;
double DeltaE;
double Te,ne;
double expint;
double G2;
double I_1,I_2;
Te=Ith(y,0);
if(Te <0 || isnan(Te)!=0) return -1;
ne=Ith(y,2);
if(ne <0 || isnan(ne)!=0) return -1;
// double v_e=sqrt(8.0*BOLTZMAN*Te/pi/EMASS);
double E_ion_H=13.6*eV2J;
double alpha_i=0.05;
double alpha_e=0.05;
double beta_i=4.0;
// double four_pi_a0_sq=4.0*pi*pow(bohr_radius,2.0);
// double E_ion_H_div_kTe_sq=pow((E_ion_H/BOLTZMAN/Te),2.0);
// double two_pi_me_kT_hsq=2.0*pi*EMASS*BOLTZMAN*Te/pow(planck,2.0);
// double log54_beta_i=log(5.0/4.0*beta_i);
double kbTe=(BOLTZMAN*Te);
kbTe=Te/11604.5;
double tmp0,tmp1,tmp2;
colrad_UserData data;
data = (colrad_UserData) user_data;
double IPD0=data->IPD0*J2eV;
double IPD1=data->IPD1*J2eV;
double IPD2=data->IPD2*J2eV;
double IPD3=data->IPD3*J2eV;
double IPD4=data->IPD4*J2eV;
double EF=data->EF*J2eV;
double Tinit=data->Tinit;
bool initial_equi = data->initial_equi;
if(initial_equi)
{
if(ABS(Te-Tinit) > Tinit*0.03) // cvode war zu eifrig und probiert extreme temp. aus
return -1;
}
//MPI
// double k_RR_fact1=32*pi*pow(bohr_radius,2.0)/3.0/175700.00067;
// double k_RR_fact2=pow((E_ion_H/BOLTZMAN/Te),2.0);
// double sigma_MPI_2;//=sigma1/LASERFREQ/pow(planck*LASERFREQ,2.0); //MPI-cross-sect. (2-photon)
// double sigma_MPI_3;
// double sigma1;
//const. zur berechnung von sigma1
// double sigma_tmp=64.0*pow(pi,4.0)*pow(ECHARGE,10.0)*EMASS/3.0/sqrt(3.0)/pow(4.0*pi*ECONST,5.0)/pow(planck,6.0)/LIGHTSPEED/pow(LASERFREQ,3.0)/pow(13.6*eV2J,2.0);
// double I_sq=It*It; //for 2-photon-ioniz.
// double I_cu=I_sq*It; // for 3-photon-ioniz.
int fail=0;
// double pow_two_pi_me_kT_hsq_tmp1= pow(two_pi_me_kT_hsq,1.5); //ACHTUNG: Das ist thermal De-Broglie Lambda
//Ich muss das für die rückwärts-raten irgendie
//durch chempot ersetzen sonst pass das net
//Ebenso in Saha
#ifdef MULTIPHOTON
double twophoton_energy=2.0*planck*LASERFREQ*J2eV;
double threephoton_energy=3.0*planck*LASERFREQ*J2eV;
double nu_div_hnu_sq=LASERFREQ/pow(planck*LASERFREQ,2.0);
double nu_div_nu_div_hnu_cub=LASERFREQ/LASERFREQ/pow(planck*LASERFREQ,3.0);
#endif
double mu=chempot(ne,Te);
double mu_eV=mu*J2eV;
double fermi_factor=eval_fermi_integrand(ne,Te,mu);
if(fermi_factor==-1)
return -1;
double kmax_estim=1e4;
//PREZERO RATE-COEFFS CODEBLOCK
{
//pre-zero k_EE's
for(i=0;i<z0_len;i++)
{
for(j=0;j<z0_len;j++)
{
k_EE_z0_z0[i][j]=0.0;
k_EE_z0_z0_b[i][j]=0.0;
}
}
for(i=0;i<z1_len;i++)
{
for(j=0;j<z1_len;j++)
{
k_EE_z1_z1[i][j]=0.0;
k_EE_z1_z1_b[i][j]=0.0;
}
}
#if MAXLEVEL > 1
for(i=0;i<z2_len;i++)
{
for(j=0;j<z2_len;j++)
{
k_EE_z2_z2[i][j]=0.0;
k_EE_z2_z2_b[i][j]=0.0;
}
}
#endif
#if MAXLEVEL > 2
for(i=0;i<z3_len;i++)
{
for(j=0;j<z3_len;j++)
{
k_EE_z3_z3[i][j]=0.0;
k_EE_z3_z3_b[i][j]=0.0;
}
}
#endif
#if MAXLEVEL > 3
for(i=0;i<z4_len;i++)
{
for(j=0;j<z4_len;j++)
{
k_EE_z4_z4[i][j]=0.0;
k_EE_z4_z4_b[i][j]=0.0;
}
}
#endif
//pre-zero k_MPI's
for(i=0;i<z0_len;i++)
{
for(j=0;j<z1_len;j++)
{
k_EI_z0_z1[i][j]=0.0;
k_EE_z1_z1[i][j]=0.0;
for(k=0;k<2;k++)
{
k_MPI_z0_z1[i][j][k]=0.0;
k_MPI_z1_z0[i][j][k]=0.0;
}
}
}
#if MAXLEVEL > 1
for(i=0;i<z1_len;i++)
{
for(j=0;j<z2_len;j++)
{
k_EI_z1_z2[i][j]=0.0;
k_EI_z2_z1[i][j]=0.0;
for(k=0;k<2;k++)
{
k_MPI_z1_z2[i][j][k]=0.0;
k_MPI_z2_z1[i][j][k]=0.0;
}
}
}
#endif
#if MAXLEVEL > 2
for(i=0;i<z2_len;i++)
{
for(j=0;j<z3_len;j++)
{
k_EI_z2_z3[i][j]=0.0;
k_EI_z3_z2[i][j]=0.0;
for(k=0;k<2;k++)
{
k_MPI_z2_z3[i][j][k]=0.0;
k_MPI_z3_z2[i][j][k]=0.0;
}
}
}
#endif
#if MAXLEVEL > 3
for(i=0;i<z3_len;i++)
{
for(j=0;j<z4_len;j++)
{
k_EI_z3_z4[i][j]=0.0;
k_EI_z4_z3[i][j]=0.0;
for(k=0;k<2;k++)
{
k_MPI_z3_z4[i][j][k]=0.0;
k_MPI_z4_z3[i][j][k]=0.0;
}
}
}
#endif
}
//ACHTUNG: Nur um initial equi zu überspringen!
// if(initial_equi==true)
// return 0;
///////////////////////////////
// Elec. excitation for Z=0
///////////////////////////////
fail=0;
#ifdef OMP
//#pragma omp parallel for schedule(dynamic,1) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2) num_threads(num_threads)
#pragma omp for simd schedule(static) private(j,kronecker,DeltaE,a,expint,G2,I_1,I_2)
// #pragma omp for simd schedule(static) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2)
#endif
for(i=0;i<z0_len;++i)
{
for(j=0;j<z0_len;++j)
{
if(j<=i) continue;
kronecker=0.0; //optically allowed transition
if(STATES_z0[i][4]==STATES_z0[j][4]) // l_j==l_i ?
kronecker=1.0;//optically forbidden transition
DeltaE=(STATES_z0[j][2]-STATES_z0[i][2]);
#ifdef DOIPD
double Ei=(STATES_z0[i][2])-IPD0+mu_eV;//+EF;
if(Ei<0) continue;
#endif
k_EE_z0_z0[i][j]=eval_excitation_integral(ne,Te,mu,DeltaE*eV2J,kronecker); // in m^3/s
k_EE_z0_z0_b[i][j]=eval_dexcitation_integral(ne,Te,mu,DeltaE*eV2J,kronecker)*STATES_z0[i][3]/STATES_z0[j][3];
k_EE_MAX=MAX(k_EE_z0_z0[i][j],k_EE_MAX);
k_EE_REV_MAX=MAX(k_EE_z0_z0_b[i][j],k_EE_REV_MAX);
}
}
if(fail==1)
return -1;
////////////////////////
// Elec. exc. for Z=1
///////////////////////
fail=0;
#ifdef OMP
// #pragma omp parallel for schedule(dynamic,1) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2) num_threads(num_threads)
#pragma omp for simd schedule(static) private(j,kronecker,DeltaE,a,expint,G2,I_1,I_2)
// #pragma omp for simd schedule(static) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2)
#endif
for(i=0;i<z1_len;++i)
{
for(j=0;j<z1_len;++j)
{
if(j<=i) continue;
kronecker=0.0;
if(STATES_z1[i][4]==STATES_z1[j][4])
kronecker=1.0;
#ifdef DOIPD
//double Ei=(STATES_z1[i][2]-STATES_z1[0][2])-IPD1+EF;
double Ei=STATES_z1[i][2]-IPD0+mu_eV;
if(Ei<0) continue;
#endif
DeltaE=(STATES_z1[j][2]-STATES_z1[i][2]);
k_EE_z1_z1[i][j]=eval_excitation_integral(ne,Te,mu,DeltaE*eV2J,kronecker); // in m^3/s
k_EE_z1_z1_b[i][j]=eval_dexcitation_integral(ne,Te,mu,DeltaE*eV2J,kronecker)*STATES_z1[i][3]/STATES_z1[j][3];
k_EE_MAX=MAX(k_EE_z1_z1[i][j],k_EE_MAX);
k_EE_REV_MAX=MAX(k_EE_z1_z1_b[i][j],k_EE_REV_MAX);
}
}
if(fail==1) return -1;
////////////////////////
// Elec. exc. for Z=2
///////////////////////
#if MAXLEVEL > 1
fail=0;
#ifdef OMP
// #pragma omp parallel for schedule(dynamic,1) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2) num_threads(num_threads)
#pragma omp for simd schedule(static) private(j,kronecker,DeltaE,a,expint,G2,I_1,I_2)
// #pragma omp for simd schedule(static) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2)
#endif
for(i=0;i<z2_len;++i)
{
for(j=0;j<z2_len;++j)
{
if(j<=i) continue;
kronecker=0.0;
if(STATES_z2[i][4]==STATES_z2[j][4])
kronecker=1.0;
#ifdef DOIPD
// double Ei=(STATES_z2[i][2]-STATES_z2[0][2])-IPD2+EF;
double Ei=STATES_z2[i][2]-IPD1+mu_eV;
if(Ei<0) continue;
#endif
DeltaE=(STATES_z2[j][2]-STATES_z2[i][2]);
k_EE_z2_z2[i][j]=eval_excitation_integral(ne,Te,mu,DeltaE*eV2J,kronecker);
k_EE_z2_z2_b[i][j]=eval_dexcitation_integral(ne,Te,mu,DeltaE*eV2J,kronecker)*STATES_z2[i][3]/STATES_z2[j][3];
k_EE_MAX=MAX(k_EE_z2_z2[i][j],k_EE_MAX);
k_EE_REV_MAX=MAX(k_EE_z2_z2_b[i][j],k_EE_REV_MAX);
}
}
if(fail==1) return -1;
#endif //MAXLEVEL > 1
////////////////////////
// Elec. exc. for Z=3
///////////////////////
#if MAXLEVEL > 2
fail=0;
#ifdef OMP
// #pragma omp parallel for schedule(dynamic,1) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2) num_threads(num_threads)
#pragma omp for simd schedule(static) private(j,kronecker,DeltaE,a,expint,G2,I_1,I_2)
// #pragma omp for simd schedule(static) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2)
#endif
for(i=0;i<z3_len;++i)
{
for(j=0;j<z3_len;++j)
{
if(j<=i) continue;
kronecker=0.0;
if(STATES_z3[i][4]==STATES_z3[j][4])
kronecker=1.0;
#ifdef DOIPD
// double Ei=(STATES_z3[i][2]-STATES_z3[0][2])-IPD3+EF;
double Ei=STATES_z3[i][2]-IPD2+mu_eV;
if(Ei<0) continue;
#endif
DeltaE=(STATES_z3[j][2]-STATES_z3[i][2]);
k_EE_z3_z3[i][j]=eval_excitation_integral(ne,Te,mu,DeltaE*eV2J,kronecker);
k_EE_z3_z3_b[i][j]=eval_dexcitation_integral(ne,Te,mu,DeltaE*eV2J,kronecker)*STATES_z3[i][3]/STATES_z3[j][3];
k_EE_MAX=MAX(k_EE_z3_z3[i][j],k_EE_MAX);
k_EE_REV_MAX=MAX(k_EE_z3_z3_b[i][j],k_EE_REV_MAX);
// printf("myid:%d,kfw:%.4e,krev:%.4e,i:%d,j:%d\n",myid,k_EE_z3_z3[i][j],k_EE_z3_z3_b[i][j],i,j);
}
}
if(fail==1)
return -1;
#endif // MAXLEVEL > 2
////////////////////////
// Elec. exc. for Z=4
///////////////////////
#if MAXLEVEL > 3
fail=0;
#ifdef OMP
// #pragma omp parallel for schedule(dynamic,1) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2) num_threads(num_threads)
#pragma omp for simd schedule(static) private(j,kronecker,DeltaE,a,expint,G2,I_1,I_2)
// #pragma omp for simd schedule(static) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2)
#endif
for(i=0;i<z4_len;++i)
{
for(j=0;j<z4_len;++j)
{
if(j<=i) continue;
kronecker=0.0;
if(STATES_z4[i][4]==STATES_z4[j][4])
kronecker=1.0;
#ifdef DOIPD
// double Ei=(STATES_z4[i][2]-STATES_z4[0][2])-IPD4+EF;
double Ei=STATES_z4[i][2]-IPD3+mu_eV;
if(Ei<0) continue;
#endif
DeltaE=(STATES_z4[j][2]-STATES_z4[i][2]);
k_EE_z4_z4[i][j]=eval_excitation_integral(ne,Te,mu,DeltaE*eV2J,kronecker);
k_EE_z4_z4_b[i][j]=eval_dexcitation_integral(ne,Te,mu,DeltaE*eV2J,kronecker);
k_EE_MAX=MAX(k_EE_z4_z4[i][j],k_EE_MAX);
k_EE_REV_MAX=MAX(k_EE_z4_z4_b[i][j],k_EE_REV_MAX);
}
}
if(fail==1)
return -1;
#endif // MAXLEVEL > 3
// *************************************************
// * NOW IONIZATION COEFFS
// *************************************************
/////////////////
// Ioniz. 0->1
/////////////////
fail=0;
#ifdef OMP
// #pragma omp parallel for schedule(static) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2,sigma1,sigma_MPI_2,sigma_MPI_3,tmp0,tmp1,tmp2) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,kronecker,DeltaE,a,expint,G2,I_1,I_2,sigma1,sigma_MPI_2,sigma_MPI_3,tmp0,tmp1,tmp2)
#endif
for(i=0;i<z0_len;++i)
{
for(j=0;j<z1_len;++j)
{
DeltaE=(STATES_z1[j][2]-STATES_z0[i][2])-IPD0;//+EF;
if(DeltaE <0 )
continue;
if(kmax_estim*ne*Ith(y,i+3)>MINRATE)
k_EI_z0_z1[i][j]=MAX(0.0,double_integral_ionization(ne,Te, mu, DeltaE*eV2J));
if(kmax_estim*ne*ne*Ith(y,j+z0_len+3)>MINRATE)
k_EI_z1_z0[i][j]=STATES_z0[i][3]/STATES_z1[j][3]*double_integral_recombination(ne,Te, mu, DeltaE*eV2J);
k_EI_MAX=MAX(k_EI_MAX,k_EI_z0_z1[i][j]);
k_EI_REV_MAX=MAX(k_EI_REV_MAX,k_EI_z1_z0[i][j]);
k_EI_z0_z1[i][j]*=fermi_factor;
k_EI_z1_z0[i][j]*=fermi_factor;
#ifdef MULTIPHOTON
// *******************
// * MPI 2 PHOTONS *
// *******************
double dE_SI=DeltaE*eV2J;
if(twophoton_energy >= DeltaE-IPD0 && DeltaE-IPD0 > 0.0 )
{
sigma1=sigma_tmp*pow(dE_SI,2.5)/sqrt(dE_SI);
sigma_MPI_2=sigma1*sigma1/nu_div_hnu_sq;
k_MPI_z0_z1[i][j][0]=sigma_MPI_2*I_sq;
}
// *******************
// * MPI 3 PHOTONS *
// *******************
if(threephoton_energy >= DeltaE-IPD0 && DeltaE-IPD0 >0.0 )
{
sigma1=sigma_tmp*pow(dE_SI,2.5)/sqrt(dE_SI);
sigma_MPI_3=sigma1*sigma1*sigma1/2.0/nu_div_nu_div_hnu_cub;
k_MPI_z0_z1[i][j][1]=sigma_MPI_3*I_cu;//*prob*beta_pi(Te,mu,DeltaE);
}
#endif
// ***************
// * RAD RECOMB *
// ***************
// if(DeltaE>0)
// {
// if(expint > 0 )
// {
// //k_MPI_z1_z0[i][j][0]=v_e*k_RR_fact1*1.0*k_RR_fact2*pow((DeltaE)*J2eV/STATES_z1[j][2],1.5)*expint*EXPR(a);
// k_MPI_z1_z0[i][j][0]=v_e*k_RR_fact1*1.0*k_RR_fact2*pow((DeltaE)/STATES_z1[j][2],1.5)*expint*EXPR(a);
// }
// else
// {
// k_MPI_z1_z0[i][j][0]=0.0; //EXPR(a) kann +Inf werden--> unsinnige rate coeff.. wenn einer der faktoren=0 --> rest egal
// }
// }
}
}
if(fail==1) return -1;
/////////////////
// Ioniz. 1->2
/////////////////
#if MAXLEVEL > 1
fail=0;
#ifdef OMP
// #pragma omp parallel for schedule(static) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2,sigma1,sigma_MPI_2,sigma_MPI_3,tmp0,tmp1,tmp2) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,kronecker,DeltaE,a,expint,G2,I_1,I_2,sigma1,sigma_MPI_2,sigma_MPI_3,tmp0,tmp1,tmp2)
#endif
for(i=0;i<z1_len;++i)
{
for(j=0;j<z2_len;++j)
{
DeltaE=(STATES_z2[j][2]-STATES_z1[i][2])-IPD1; //+EF;
if(DeltaE <0 )
continue;
if(kmax_estim*ne*Ith(y,i+z0_len+3)>MINRATE)
k_EI_z1_z2[i][j]=MAX(0.0,double_integral_ionization(ne,Te, mu, DeltaE*eV2J));
if(kmax_estim*ne*ne*Ith(y,j+z0_len+z1_len+3)>MINRATE)
k_EI_z2_z1[i][j]=STATES_z1[i][3]/STATES_z2[j][3]*double_integral_recombination(ne,Te, mu, DeltaE*eV2J);
k_EI_MAX=MAX(k_EI_MAX,k_EI_z1_z2[i][j]);
k_EI_REV_MAX=MAX(k_EI_REV_MAX,k_EI_z2_z1[i][j]);
k_EI_z1_z2[i][j]*=fermi_factor;
k_EI_z2_z1[i][j]*=fermi_factor;
#ifdef MULTIPHOTON
double dE_SI=DeltaE*eV2J;
// *******************
// * MPI 2 PHOTONS *
// *******************
if(twophoton_energy >= DeltaE - IPD1)
{
sigma1=sigma_tmp*pow(dE_SI,2.5)/sqrt(dE_SI);
sigma_MPI_2=sigma1*sigma1/nu_div_hnu_sq;
k_MPI_z1_z2[i][j][0]=sigma_MPI_2*I_sq;
}
// *******************
// * MPI 3 PHOTONS *
// *******************
if(threephoton_energy > DeltaE - IPD1)
{
sigma1=sigma_tmp*pow(dE_SI,2.5)/sqrt(dE_SI);
sigma_MPI_3=sigma1*sigma1*sigma1/2.0/nu_div_nu_div_hnu_cub;
k_MPI_z1_z2[i][j][1]=sigma_MPI_3*I_cu;//*prob*beta_pi(Te,mu,DeltaE);
}
#endif
// **************
// * RAD RECOMB *
// **************
// if(DeltaE>0)
// {
// if(expint > 0 )
// {
// k_MPI_z2_z1[i][j][0]=v_e*k_RR_fact1*4.0*k_RR_fact2*pow((DeltaE)/STATES_z2[j][2],1.5)*expint*EXPR(a);
// }
// else
// {
// k_MPI_z2_z1[i][j][0]=0.0; //EXPR(a) kann +Inf werden--> unsinnige rate coeff.. wenn einer der faktoren=0 --> rest egal
// }
// }
}
}
if(fail==1) return -1;
#endif //MAXLEVEL > 1
/////////////////
// Ioniz. 2->3
/////////////////
#if MAXLEVEL > 2
fail=0;
#ifdef OMP
// #pragma omp parallel for schedule(static) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2,sigma1,sigma_MPI_2,sigma_MPI_3,tmp0,tmp1,tmp2) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,kronecker,DeltaE,a,expint,G2,I_1,I_2,sigma1,sigma_MPI_2,sigma_MPI_3,tmp0,tmp1,tmp2)
#endif
for(i=0;i<z2_len;++i)
{
for(j=0;j<z3_len;++j)
{
DeltaE=(STATES_z3[j][2]-STATES_z2[i][2])-IPD2;//+EF;
if(DeltaE <0 )
continue;
if(kmax_estim*ne*Ith(y,i+z0_len+z1_len+3)>MINRATE)
k_EI_z2_z3[i][j]=MAX(0.0,double_integral_ionization(ne,Te, mu, DeltaE*eV2J));
if(kmax_estim*ne*ne*Ith(y,j+z0_len+z1_len+z2_len+3)>MINRATE)
k_EI_z3_z2[i][j]=STATES_z2[i][3]/STATES_z3[j][3]*double_integral_recombination(ne,Te, mu, DeltaE*eV2J);
k_EI_MAX=MAX(k_EI_MAX,k_EI_z1_z2[i][j]);
k_EI_REV_MAX=MAX(k_EI_REV_MAX,k_EI_z2_z1[i][j]);
k_EI_z2_z3[i][j]*=fermi_factor;
k_EI_z3_z2[i][j]*=fermi_factor;
#ifdef MULTIPHOTON
double dE_SI=DeltaE*eV2J;
// *******************
// * MPI 2 PHOTONS *
// *******************
if(twophoton_energy > DeltaE - IPD2)
{
sigma1=sigma_tmp*pow(dE_SI,2.5)/sqrt(dE_SI);
sigma_MPI_2=sigma1*sigma1/nu_div_hnu_sq;
k_MPI_z2_z3[i][j][0]=sigma_MPI_2*I_sq;
}
// *******************
// * MPI 3 PHOTONS *
// *******************
if(threephoton_energy > DeltaE -IPD2)
{
sigma1=sigma_tmp*pow(dE_SI,2.5)/sqrt(dE_SI);
sigma_MPI_3=sigma1*sigma1*sigma1/2.0/nu_div_nu_div_hnu_cub;
k_MPI_z2_z3[i][j][1]=sigma_MPI_3*I_cu;//*prob*beta_pi(Te,mu,DeltaE);
}
#endif
}
}
if(fail==1) return -1;
#endif // MAXLEVEL > 2
/////////////////
// Ioniz. 3->4
/////////////////
#if MAXLEVEL > 3
fail=0;
#ifdef OMP
// #pragma omp parallel for schedule(static) collapse(2) private(kronecker,DeltaE,a,expint,G2,I_1,I_2,sigma1,sigma_MPI_2,sigma_MPI_3,tmp0,tmp1,tmp2) num_threads(num_threads)
#pragma omp for nowait schedule(static) private(j,kronecker,DeltaE,a,expint,G2,I_1,I_2,sigma1,sigma_MPI_2,sigma_MPI_3,tmp0,tmp1,tmp2)
#endif
for(i=0;i<z3_len;++i)
{
for(j=0;j<z4_len;++j)
{
DeltaE=(STATES_z4[j][2]-STATES_z3[i][2])-IPD3;//+EF;
if(DeltaE <0 )
continue;
if(kmax_estim*ne*Ith(y,i+z0_len+z1_len+z2_len+3)>MINRATE)
k_EI_z3_z4[i][j]=MAX(0.0,double_integral_ionization(ne,Te, mu, DeltaE*eV2J));
if(kmax_estim*ne*ne*Ith(y,j+z0_len+z1_len+z2_len+z3_len+3)>MINRATE)
k_EI_z4_z3[i][j]=STATES_z3[i][3]/STATES_z4[j][3]*double_integral_recombination(ne,Te, mu, DeltaE*eV2J);
k_EI_MAX=MAX(k_EI_MAX,k_EI_z3_z4[i][j]);
k_EI_REV_MAX=MAX(k_EI_REV_MAX,k_EI_z4_z3[i][j]);
k_EI_z3_z4[i][j]*=fermi_factor;
k_EI_z4_z3[i][j]*=fermi_factor;
#ifdef MULTIPHOTON
double dE_SI=DeltaE*eV2J;
// *******************
// * MPI 2 PHOTONS *
// *******************
if(twophoton_energy> DeltaE - IPD3)
{
sigma1=sigma_tmp*pow(dE_SI,2.5)/sqrt(dE_SI);
sigma_MPI_2=sigma1*sigma1/nu_div_hnu_sq;
k_MPI_z3_z4[i][j][0]=sigma_MPI_2*I_sq;
}
// *******************
// * MPI 3 PHOTONS *
// *******************
if(threephoton_energy > DeltaE - IPD3)
{
sigma1=sigma_tmp*pow(dE_SI,2.5)/sqrt(dE_SI);
sigma_MPI_3=sigma1*sigma1*sigma1/2.0/nu_div_nu_div_hnu_cub;
k_MPI_z3_z4[i][j][1]=sigma_MPI_3*I_cu;//*prob*beta_pi(Te,mu,DeltaE);
}
#endif
// **************
// * RAD RECOMB *
// **************
// if(DeltaE>0)
// {
// if(expint>0)
// {
// k_MPI_z4_z3[i][j][0]=v_e*k_RR_fact1*4.0*k_RR_fact2*pow((DeltaE)/STATES_z4[j][2],1.5)*expint*EXPR(a);
// }
// else
// {
// k_MPI_z4_z3[i][j][0]=0.0;
// }
// }
}
}
if(fail==1) return -1;
#endif // MAXLEVEL > 3
// if(k_EI_MAX >0 || k_EI_REV_MAX > 0)
// printf("myid:%d, k_EI_MAX:%.4e, k_EI_REV_MAX:%.4e,fac:%.4e\n",myid,k_EI_MAX,k_EI_REV_MAX,fermi_factor);
return 0;
}
// ***************************************************************************************
double ExpInt(double x)
{
if(x>350)
{
return 0; //wird extrem klein -> inderflow
}
return gsl_sf_expint_E1(x);
//Approx by Swamee and Ohija
double A=log((0.56146/x+0.65)*(1+x));
//Numeric limit
double B;
B=x*x*x*x*exp(7.7*x)*pow(2.0+x,3.7);
return pow(pow(A,-7.7)+B,-0.13);
}
#ifdef USEFLOAT
float fak(float t, float x, float j,float s) //aux. function for genexpint
{
return powf(t,x-1)*powf( powf(logf(-logf(t)) , j) / -logf(t),s);
}
#else
double fak(double t, double x, double j,double s) //aux. function for genexpint
{
return pow(t,x-1)*pow( pow(log(-log(t)) , j) / -log(t),s);
}
#endif
#ifdef USEFLOAT
float genexpint(float x,float ss,float j)
{
int maks=5;
float eps=1E-6;
float b=0.36787944117f; //exp(-1)
float s_old=0;
float i=0.0;
float s=0.0;
float t=0.0;
float d=0.0;
float dd=0.0;
float sum=0.0;
float m=0.0;
int n,k;
for(n=1;n<maks+1;++n)
{
if(n==1)
{
i=1.0;
float logt=logf(0.5*b);
float simplefak=-powf(0.5*b,x-1.0)*logf(-logt)/logt;
s=b*simplefak;
}
else
{
m=i;
d=b/(3*m);
dd=2*d;
t=0.5*d;
sum=0.0;
for(k=1;k<m+1;++k)
{
float logt=logf(t);
float simplefak=-powf(t,x-1.0)*logf(-logt)/logt;
sum+=simplefak;
t=t+dd;
logt=logf(t);
simplefak=-powf(t,x-1.0)*logf(-logt)/logt;
sum+=simplefak;
t=t+d;
}
i=i*3.0;
s=(s+b*sum/m)/3.0;
}
s=s/tgammaf(j+1.0);
if(fabs(s_old-s)<=eps)
break;
else
s_old=s;
}
return s; ///tgamma(j+1.0);
//return s/tgamma1pm1(j+1.0); //,policy<digits10<3> >());
}
#else
double genexpint(double x,double ss,double j)
// G_k(x)= E_1^(k-1) (x) wobei
// E_ss^j= 1/(Gamma(j+1)) * int_1^inf (ln(t))^j * t^(-ss) e^(-z*t) dt
{
//Nuri Ozlap, Elgiz Bairamov, "uniform convergence and computation of the generalized exponential integrals"
//J.Math.Chem. 49:520-530 (2011)
// Siehe auch. The effcient computation of some generalisedexponential integrals
//
// double j=1.0; //j=k-1 (k=2)
// double ss=1.0;
// für x=60 ---> 4.7243e-45
// if(x>=60)
// return 5e-45;
int maks=5;
double eps=1E-12;
eps=1E-6;
double b=exp(-1.);
double s_old=0;
double i=0.0;
double s=0.0;
double t=0.0;
double d=0.0;
double dd=0.0;
double sum=0.0;
double m=0.0;
int n,k;
for(n=1;n<maks+1;++n)
{
if(n==1)
{
i=1.0;
//s=b*fak(0.5*b,x,j,ss);
double logt=log(0.5*b);
double simplefak=-pow(0.5*b,x-1.0)*log(-logt)/logt;
s=b*simplefak;
}
else
{
m=i;
d=b/(3*m);
dd=2*d;
t=0.5*d;
sum=0.0;
for(k=1;k<m+1;++k)
{
double logt=log(t);
double simplefak=-pow(t,x-1.0)*log(-logt)/logt;
//sum=sum+fak(t,x,j,ss);
sum+=simplefak;
t=t+dd;
logt=log(t);
simplefak=-pow(t,x-1.0)*log(-logt)/logt;
//sum=sum+fak(t,x,j,ss);
sum+=simplefak;
t=t+d;
}
i=i*3.0;
s=(s+b*sum/m)/3.0;
}
s=s/tgamma(j+1.0);
if(fabs(s_old-s)<=eps)
break;
else
s_old=s;
}
return s; ///tgamma(j+1.0);
//return s/tgamma1pm1(j+1.0); //,policy<digits10<3> >());
}
#endif
// ************************************************
// * RE-ABSORPTION KRAM
// ************************************************
double StarkWidth(double nu,double nl,double Te,double Ti,double Tr,double Ne,double Ni)
{
//ACHTUNG (nu-nl) muss größer 0 sein !!! sonst w_qs=0 und => escape_fac=0
//Te-input, Ti-input, Tr-input in eV !
//Zi=degree of ionization
double Zcore=13.0;
double Zr=Zcore-1.0;
Ni=Ni/(1/pow(bohr_radius,3.0)); // m^-3 to bohr_radius^-3
Ne=Ne/(1/pow(bohr_radius,3.0));
double Zi=Ne/Ni;
Te=Te*eV2H;
Ti=Ti*eV2H;
Tr=Tr*eV2H; //radiators temp.
double mi=26.981*AMU/EMASS; //mass in units of emass
double me=1.0;
double mr=1.0*AMU/EMASS;
double ve=sqrt(Te/me);
double vi=sqrt(Ti/mi);
double vr=sqrt(Tr/mi);
double re=pow(3.0/4.0/pi/Ne,1.0/3.0); //mean distance
double ri=pow(3.0/4.0/pi/Ni,1.0/3.0);
double qi=Zi;
double qe=1.0;
double qr=-Zr; //mui importante
double F=2.0*pi*pow(4.0/15.0,2.0/3.0)*qe*pow(Ne,2.0/3.0); //Holstmarkian field
double debye=sqrt(Te/4.0/pi/Ne/qe/qe);
double eta_pert=(1.0+re/debye)*exp(-re/debye);
double eta_rad=exp(-qr*qe/re/Te);
double omega_mean=1.4385;
double kronecker=0;
if(nu-nl==1.0)
kronecker=0.5;
else
kronecker=1.0;
double w_qs=omega_mean*3.0*(nu*nu-nl*nl)/Zcore*F*eta_pert*eta_rad*kronecker;
double f_F=sqrt(ve*ve+vr*vr)/re; //microfield freq.
double R_s=w_qs/f_F/eta_rad; // static/dynamic stark ratio
double f_s=R_s/(R_s+0.5); //quasi-staticity factor
double w_ss=f_s*w_qs; //Full stark width in hartree
double w_J=w_ss*4.359745e-18; //hartree to Joule
if(w_ss==0) //escape factor will be NaN
{
printf("WARNING:w_ss =0\n");
printf("R_s:%f,w_qs:%f,kron:%f,f_F:%f\n",R_s,w_qs,kronecker,f_F);
}
//bandwidth conversion to meters
//double delta_lambda=w_J*lambda0*lambda0/planck/LIGHTSPEED; //in m
return w_J/planck; /// in Hz
//return delta_lambda;
}
//eigentl. tansmission factor (c.f.
double EscapeFactor(double w,double tau0)
{
//w in Angstrom
/*
double a=5.441*exp(-18.55*tau0)+0.668*exp(-0.0515*tau0);
double b=1.259e-5*pow(tau0,4.0)-2.694e-4*pow(tau0,3.0)+1.877e-3*pow(tau0,2.0)-1.625e-3*tau0+6.887e-3;
double c=5.773*exp(-18.63*tau0)+0.7008*exp(-0.05994*tau0);
double d=(2.245*tau0*tau0-0.08527*tau0+0.1311)/(tau0*tau0+8.936*tau0+2.649);
double T=(a*w*w+b*w)/(c*w*w+d);
*/
double lambda_lo=-10.0*w;
double lambda_hi=+10.0*w;
double T=trapz(lambda_lo,lambda_hi,100,w,tau0);
return T;
}
double Lorentzian(double w,double lambda) //both params in angs!
{
//Area-normalized
//https://magicplot.com/wiki/fit_equations
//return 1.0/pi*0.5*w/(pow(lambda,2.0)+pow(0.5*w,2.0));
return 1.0/(w/2.0)/pi*1.0/(1.0+pow(lambda/(w/2.0),2.0));
}
double integrand(double w,double lambda,double tau,double phi0)
{
double exponent=tau*phi0*Lorentzian(w,lambda);
//printf("tau:%.4e,phi0:%.4e,Lor:%.4e,e:%.4e\n",
// tau,phi0,Lorentzian(w,lambda),exponent);
return exp(-exponent)*Lorentzian(w,lambda);
}
double trapz(double a,double b,int n,double w,double tau)
{
double phi0=Lorentzian(w,0.0);
double area=0.0;
double sum=0.0;
double h=(b-a)/n;
int x;
for(x=1;x<n;++x)
{
sum+=integrand(w,((double) x)*h+a,tau,phi0);
}
area=h/2.0*(integrand(w,a,tau,phi0)+integrand(w,b,tau,phi0)+2*sum);
return area;
}
// Springer Handbook of atomic,molecular and optical physics S.838 (Part D)
// c.f. https://www.nist.gov/pml/atomic-spectroscopy-compendium-basic-ideas-notation-data-and-formulas/atomic-spectroscopy
// Achtung: Geht nur wenn DeltaN > 0, also übergänge in selbe schalen nicht
// möglich. Grund: Näherung des Gaunt-Faktors
double EinsteinCoeff(double n1,double n2,double g2,double DeltaE)
{
double Delta_Lambda=planck*LIGHTSPEED/DeltaE;
double DeltaN=n2-n1;
double eps1=1.0/n1/n1;
double eps2=1.0/n2/n2;
double Gaunt=1.0-0.25/fabs(DeltaN);
double S; //line strength
double A21;
double z=13.0;
double nominator=32.0/pi/sqrt(3)*pow(ECHARGE*bohr_radius/z,2.0)*pow(eps1*eps2,1.5)*Gaunt;
double denom=pow(eps1-eps2,4.0);
//S=32.0/pi/sqrt(3)*pow(ECHARGE*bohr_radius/z,2.0)*pow(eps1*eps2,1.5)/pow(eps1-eps2,4.0)*Gaunt;
S=nominator/denom;
A21=16.0*pow(pi,3.0)/3.0/planck/ECONST/pow(Delta_Lambda,3.0)/g2*S;
/*
{
//printf("Error:A21 is NaN\n");
printf("A21:%.4e,Gaunt:%f,deltaN:%f,S:%.4e,eps1:%f,eps2:%f,Nominator:%.4e,Denom:%.4e,g2:%f,Delta_Lambda:%.4e\n",
A21,Gaunt,DeltaN,S,eps1,eps2,nominator,denom,g2,Delta_Lambda);
}
*/
return A21;
}
// ********************************************************************
// * WRITE COLRAD CONCENTRATIONS TO FILE FOR RESTART
// ********************************************************************
int colrad_write(int number)
{
FILE *outfile;
char fname[255];
sprintf(fname, "%s.%d.%d.colrad", outfilename, myid,number);
outfile = fopen(fname, "w");
if (NULL == outfile)
{
char errstr[255];
sprintf(errstr,"ERROR: cannot open colrad outfile %s\n",fname);
error(errstr);
}
int i,j,k,l;
for(i=1; i < local_fd_dim.x-1; i++)
{
for(j=1; j < local_fd_dim.y-1; j++)
{
for(k=1; k < local_fd_dim.z-1; k++)
{
fprintf(outfile,"%d %d %d",i,j,k);
for(l=0;l<neq;l++)
{
fprintf(outfile, " %.4e ", Ith(l1[i][j][k].y,l));
}
fprintf(outfile,"\n");
}
}
}
fclose(outfile);
return 0; //alles ok
}
// ********************************************************************
// * READ COLRAD CONCENTRATIONS FOR RESTART
// ********************************************************************
int colrad_read(int number)
{
FILE *infile;
char fname[255];
int i,j,k,l;
sprintf(fname, "%s.%d.%d.colrad", outfilename, myid,number);
infile=fopen(fname,"r");
if(infile==NULL)
{
char errstr[255];
sprintf(errstr,"ERROR: Colrad infile %s not found\n", fname);
error(errstr);
}
double tmp;
char **tokens;
size_t numtokens;
char line[MAX_LINE_LENGTH];
int linenr=1;
for(i=1; i < local_fd_dim.x-1; i++)
{
for(j=1; j < local_fd_dim.y-1; j++)
{
for(k=1; k < local_fd_dim.z-1; k++)
{
//read data
if (fgets (line, MAX_LINE_LENGTH, infile) == NULL) {
char errstr[255];
sprintf(errstr,"Error Reading colrad-input-file: %s in line %d.\n", fname,linenr);
error(errstr);
}
tokens = strsplit(line, ", \t\n", &numtokens); //strsplit in imd_ttm.char
for(l=0;l<neq;l++)
{
sscanf(tokens[l+3], "%lf", &tmp);
Ith(l1[i][j][k].y,l)=tmp;
}
linenr++;
for (l = 0; l < numtokens; l++) {
free(tokens[l]);
}
if (tokens != NULL)
free(tokens);
}
}
}
fclose(infile);
return 0;
}
// double integrand_cross_section (double x, void * p)
// {
// //TODO: Konstanten zusammenfassen
// //float wo möglich
// struct my_f_params * params = (struct my_f_params *)p;
// double eng=x;
// double ne=params->ne;
// double T=params->T;
// double mu=params->mu;
// double DeltaE = params->DeltaE;
// double (*cross_func)(double,double) = params->fun_ptr;
// //n_FERMI=@(eng,ne,T) (2*emass)^1.5/2/ne/hbar^3/pi^2 *sqrt(eng).*fermi(eng,ne,T); %identisch
// double fermi_fun=1.0/(1.0+exp((eng-mu)/BOLTZMAN/T));
// double vel=sqrt(2.0*eng/EMASS);
// double sigma=cross_func(eng,DeltaE);
// double f=pow(2.0*EMASS,1.5)/2.0/ne/pow(HBAR,3.0)/M_PI/M_PI*sqrt(eng)*fermi_fun*vel*sigma;
// return f;
// }
// double cross_section_ionization(double E,double DeltaE)
// {
// double alpha=0.05;
// double beta=4.0;
// double y=E/DeltaE;
// double sigma=4.0*M_PI*pow(bohr_radius,2.0)* pow(E_ion_H*eV2J/DeltaE ,2.0) * alpha * (y-1.0)/y/y*log(5/4*beta*y);
// return sigma;
// }
double inner_integrand_ionization(double x, void *p) // x=E_strich
{
struct my_f_params * params = (struct my_f_params *)p;
double E_prime=x;
double ne=params->ne;
double T=params->T;
double mu=params->mu;
double DeltaE = params->DeltaE;
double E=params->E; //brauche ich nachher für E''=E-E'-DELTAE
double E_prime_prime=E-E_prime-DeltaE;
double Pauli_E_prime=1.0-1.0/(1.0+exp((E_prime-mu)/BOLTZMAN/T));
// if(Pauli_E_prime < 1e-100) return 0;
double Pauli_E_prime_prime=1.0-1.0/(1.0+exp((E_prime_prime-mu)/BOLTZMAN/T));
// if(Pauli_E_prime_prime < 1e-100) return 0;
double alpha=0.05;
double beta=4.0;
//if(E_prime==DeltaE) return 0;
// double c1=alpha*4.0*M_PI* gsl_pow_2(bohr_radius)* gsl_pow_2(E_ion_H*eV2J/DeltaE)*DeltaE;
// double c2=5.0/4.0*beta/DeltaE;
// double sigma_deriv=c1*((2*DeltaE-E_prime)*log(c2*E_prime)-DeltaE+E_prime)/gsl_pow_3(E_prime);
// double sigma_deriv=4.0*M_PI*pow(bohr_radius,2.0)*pow(E_ion_H*eV2J/DeltaE,2.0)*alpha*DeltaE
// *((2*DeltaE-E_prime)*log(5.0*beta/4.0/DeltaE/E_prime)-DeltaE+E_prime)/pow(E_prime,3.0);
// double sigma_deriv=4.0*M_PI*bohr_radius_sq*alpha*E_ion_H_sq_J/DeltaE/gsl_pow_3(E_prime)
// *((2.0*DeltaE-E_prime)*log(5.0/4.0*beta*E_prime/DeltaE) - DeltaE + E_prime);
double sigma_deriv=4.0*pi*bohr_radius_sq*alpha*E_ion_H_sq_J *
(DeltaE*log(5/4*beta*(DeltaE+E_prime+E_prime_prime)/DeltaE)+E_prime+E_prime_prime) /
DeltaE / gsl_pow_2(DeltaE+E_prime + E_prime_prime);
double f=sigma_deriv*Pauli_E_prime*Pauli_E_prime_prime; //F(E) im outer integrand
//printf("finner:%.4e %.4e\n",f,E_prime);
return f;
}
double inner_integrand_recombination(double x, void *p) // x=incoming elec. energy : ACHTUNG: Für x=0 --> NaN
{
struct my_f_params * params = (struct my_f_params *)p;
double E_prime=x;
double ne=params->ne;
double T=params->T;
double mu=params->mu;
double DeltaE = params->DeltaE;
double E_prime_prime=params->E;
// Anmerkung: E'' und E' sind die enregien der einfallenden elektronen
// während E=DeltaE+E'+E'' energie des sekunddären Elektrons!
double E=DeltaE+E_prime+E_prime_prime;
double vel=2.0*sqrt(E_prime * E_prime_prime)/EMASS;
if(vel<1e-200) return 0;
double Pauli_E=1.0-1.0/(1.0+exp((E-mu)/BOLTZMAN/T));
// if(Pauli_E < 1e-100) return 0;
double fermi_fun=1.0/(1.0+exp((E_prime-mu)/BOLTZMAN/T));
// if(fermi_fun < 1e-100) return 0;
double F_E_prime=double_emass_pow_3_2/2.0/ne/hbar_cub/pi_sq*sqrt(E_prime)*fermi_fun;
double KR_factor= E/E_prime/E_prime_prime; //Klein-Rosseland relation
//der rest des faktors kann aus den integralen gezogen werden (hbar^3/2/me^2)
//F_E_prime_prime wird vom äußeren Intgral berücksichtigt
double alpha=0.05;
double beta=4.0;
double sigma_deriv=4.0*pi*bohr_radius_sq*alpha*E_ion_H_sq_J *
(DeltaE*log(5/4*beta*(DeltaE+E_prime+E_prime_prime)/DeltaE)+E_prime+E_prime_prime) /
DeltaE / gsl_pow_2(DeltaE+E_prime + E_prime_prime);
// printf("x:%.4e, fermi_fun:%.4e,")
double f=sigma_deriv*Pauli_E*F_E_prime*vel*KR_factor;
// if(isnan(f)!=0) return 0.0;
// if(myid==3)
// printf("finner:%.4e %.4e %.4e %.4e %.4e %.4e\n",E_prime,f,sigma_deriv,F_E_prime,Pauli_E,KR_factor);
return f;
}
double outer_integrand_recombination(double x,void *p)
{
struct my_f_params * params = (struct my_f_params *)p;
double E_prime_prime=x; //other incoming electron (inneres Integral behandelt E_prime)
//Das sekundäre Elektron hat Energie E,
double DeltaE = params->DeltaE;
double ne=params->ne;
double T=params->T;
double mu=params->mu;
double fermi_fun=1.0/(1.0+exp((E_prime_prime-mu)/BOLTZMAN/T));
// if(fermi_fun<1e-100) return 0;
double F=double_emass_pow_3_2/2.0/ne/hbar_cub/pi_sq*sqrt(E_prime_prime)*fermi_fun; //velocity macht inner integrand
// if(F < 1e-100) return 0;
// if(F < 1e-100)
// return 0.0;
fparams_inner.T=T;
fparams_inner.ne=ne;
fparams_inner.mu=mu;
fparams_inner.DeltaE=DeltaE;
fparams_inner.E=E_prime_prime;
gsl_function gslfun_inner;
gslfun_inner.function=&inner_integrand_recombination;
gslfun_inner.params=&fparams_inner;
double integ_inner;
double integ_err;
// gsl_integration_qags (&gslfun_inner, 0.0, muINF, integ_abstol, integ_reltol, integ_meshdim,
// winteg_inner, &integ_inner, &integ_err);
// gsl_integration_qagiu (&gslfun_inner, 0.0, integ_abstol, integ_reltol, integ_meshdim,
// winteg_inner, &integ_inner, &integ_err);
gsl_integration_qag(&gslfun_inner, 0, muINF, integ_abstol, integ_reltol, integ_meshdim,1,
winteg_inner, &integ_inner, &integ_err);
return F*integ_inner;
}
double double_integral_recombination(double ne,double T, double mu, double DeltaE)
{
// return 0;
gsl_function gslfun_outer;
gslfun_outer.function = &outer_integrand_recombination;
fparams_outer.T=T;
fparams_outer.ne=ne;
fparams_outer.mu=mu;
fparams_outer.DeltaE=DeltaE;
gslfun_outer.params = &fparams_outer;
double integ_outer=0;
double integ_err=0;
double KR_factor2=hbar_cub/2.0/ EMASS / EMASS; //aus inner-integrand herausgezogen
// gsl_integration_qagiu(&gslfun_outer, 0.0, integ_abstol, integ_reltol, integ_meshdim,
// winteg_outer, &integ_outer, &integ_err);
gsl_integration_qag(&gslfun_outer, 0, muINF, integ_abstol, integ_reltol, integ_meshdim,1,
winteg_outer, &integ_outer, &integ_err);
//gsl_integration_qags (&gslfun_outer, 0.0, muINF, integ_abstol, integ_reltol, integ_meshdim,
// winteg_outer, &integ_outer, &integ_err);
//NICHT VERGESSEN: RATIO DER STATISTICAL WEIGHTS
// if(integ_outer < MINRATE) integ_outer=0.0;
return MAX(integ_outer*KR_factor2,0.0);
}
double outer_integrand_ionization(double x,void *p)
{
struct my_f_params * params = (struct my_f_params *)p;
double eng=x;
double DeltaE = params->DeltaE;
double ne=params->ne;
double T=params->T;
double mu=params->mu;
if(x==0) return 0.0; //weil vel=0 wird
double vel=sqrt(2.0*eng/EMASS);
if(vel < 1e-200) return 0;
double fermi_fun=1.0/(1.0+exp((eng-mu)/BOLTZMAN/T));
// if(fermi_fun < 1e-100) return 0;
double F=double_emass_pow_3_2/2.0/ne/hbar_cub/pi_sq*sqrt(eng)*fermi_fun*vel; //DOS * f_FD * vel / ne
fparams_inner.T=T;
fparams_inner.ne=ne;
fparams_inner.mu=mu;
fparams_inner.DeltaE=DeltaE;
fparams_inner.E=eng;
gsl_function gslfun_inner;
gslfun_inner.function=&inner_integrand_ionization;
gslfun_inner.params=&fparams_inner;
double integ_inner;
double integ_err;
// gsl_integration_qags (&gslfun_inner, DeltaE, muINF, integ_abstol, integ_reltol, integ_meshdim,
// winteg_inner, &integ_inner, &integ_err);
// gsl_integration_qagiu (&gslfun_inner, DeltaE,integ_abstol, integ_reltol, integ_meshdim,
// winteg_inner, &integ_inner, &integ_err);
gsl_integration_qag(&gslfun_inner, DeltaE, muINF, integ_abstol, integ_reltol, integ_meshdim,1,
winteg_inner, &integ_inner, &integ_err);
return F*integ_inner;
}
double double_integral_ionization(double ne,double T, double mu, double DeltaE)
{
// return 0;
gsl_function gslfun_outer;
gslfun_outer.function = &outer_integrand_ionization;
fparams_outer.T=T;
fparams_outer.ne=ne;
fparams_outer.mu=mu;
fparams_outer.DeltaE=DeltaE;
gslfun_outer.params = &fparams_outer;
double integ_outer=0;
double integ_err=0;
// gsl_integration_qags (&gslfun_outer, 0, muINF, integ_abstol, integ_reltol, integ_meshdim,
// winteg_outer, &integ_outer, &integ_err);
// gsl_integration_qagiu(&gslfun_outer, 0, integ_abstol, integ_reltol, integ_meshdim,
// winteg_outer, &integ_outer, &integ_err);
gsl_integration_qag(&gslfun_outer, 0, muINF, integ_abstol, integ_reltol, integ_meshdim,1,
winteg_outer, &integ_outer, &integ_err);
// if(integ_outer<MINRATE) integ_outer=0.0;
return MAX(integ_outer,0.0);
}
double fermi_integrand(double x, void *p)
{
struct my_f_params * params = (struct my_f_params *)p;
double eng=x;
double ne=params->ne;
double T=params->T;
double mu=params->mu;
double vel=sqrt(2.0*eng/EMASS);
double fermi_fun=1.0/(1.0+exp((eng-mu)/BOLTZMAN/T));
double F=double_emass_pow_3_2/2.0/ne/hbar_cub/pi_sq*sqrt(eng)*fermi_fun; // DOS * f_fermi
return F;
}
double eval_fermi_integrand(double ne,double T, double mu)
{
fparams_fermi.T=T;
fparams_fermi.ne=ne;
fparams_fermi.mu=mu;
gsl_function fun;
fun.function=&fermi_integrand;
fun.params=&fparams_fermi;
double integ_err=0;
double integ_result=0;
gsl_error_handler_t *old_error_handler=gsl_set_error_handler_off ();
// int code= gsl_integration_qags (&fun, mu, muINF, integ_abstol, integ_reltol, integ_meshdim,
// winteg_fermi, &integ_result, &integ_err);
// int code=gsl_integration_qagiu(&fun, mu, integ_abstol, integ_reltol, integ_meshdim,
// winteg_fermi, &integ_result, &integ_err);
int code= gsl_integration_qag(&fun, 0, muINF, integ_abstol, integ_reltol, integ_meshdim,1,
winteg_fermi, &integ_result, &integ_err);
gsl_set_error_handler(old_error_handler); //reset the error handler
return (code==GSL_SUCCESS ? integ_result : -1); // RHS abbrechen -> neuer versuch
//return integ_result;
}
double integrand_excitation(double x,void *p)
{
struct my_f_params * params = (struct my_f_params *)p;
double eng=x;
double DeltaE = params->DeltaE;
double ne=params->ne;
double T=params->T;
double mu=params->mu;
int allowed=params->allowed;
double alpha=0.05;
double beta=4.0;
double vel=sqrt(2.0*eng/EMASS);
if(vel< 1e-200) return 0.0;
double fermi_fun=1.0/(1.0+exp((eng-mu)/BOLTZMAN/T));
// if(fermi_fun < 1e-100) return 0;
double sigma=0.0;
double y=eng/DeltaE;
double Pauli=1.0-1.0/(1.0+exp((eng-DeltaE+mu)/BOLTZMAN/T));
// if(Pauli < 1e-100) return 0;
if(allowed==1)
sigma=4.0*M_PI*bohr_radius_sq*E_ion_H_sq_J* gsl_pow_2(1.0/DeltaE)*alpha*(y-1.0)/gsl_pow_2(y)*log(5*beta*y/4);
else
sigma=4.0*M_PI*bohr_radius_sq*alpha*(y-1.0)/gsl_pow_2(y);
double F=double_emass_pow_3_2/2.0/ne/hbar_cub/pi_sq*sqrt(eng)*fermi_fun;
return vel*sigma*F*Pauli;
}
double eval_excitation_integral(double ne,double T,double mu, double DeltaE, int allowed)
{
gsl_function fun;
fun.function = &integrand_excitation;
fparams_exc.T=T;
fparams_exc.ne=ne;
fparams_exc.mu=mu;
fparams_exc.DeltaE=DeltaE;
fparams_exc.allowed=allowed;
fun.params = &fparams_exc;
double integ_result=0;
double integ_err=0;
gsl_error_handler_t *old_error_handler=gsl_set_error_handler_off ();
// int code= gsl_integration_qags (&fun, DeltaE, muINF, integ_abstol, integ_reltol, integ_meshdim,
// winteg_exc, &integ_result, &integ_err);
int code= gsl_integration_qag(&fun, DeltaE, muINF, integ_abstol, integ_reltol, integ_meshdim,1,
winteg_exc, &integ_result, &integ_err);
// size_t neval;
// int code= gsl_integration_qng(&fun, DeltaE, muINF, 1e-20, 1e-3, //FAST? failed to reach tolerance with highest-order rule
// &integ_result, &integ_err, &neval);
//Wird sonst immer zu nul
// gsl_integration_qagiu(&fun, DeltaE, 1e-200, 1e-30, integ_meshdim,
// winteg_exc, &integ_result, &integ_err);
gsl_set_error_handler(old_error_handler); //reset the error handler
if (code != GSL_SUCCESS)
{
//print integrand
int i=0;
double dx=(muINF-DeltaE)/250;
for(i=0;i<250;i++)
{
printf("x:%.4e,integ:%.4e\n",dx*i,integrand_excitation(dx*i,&fparams_exc));
}
error("ERROR in eval_excitation_integral\n");
}
//return integ_result;
// if(myid==1) printf("integ:%.4e\n",integ_result);
// if(integ_result < MINRATE) integ_result=0.0;
return MAX(integ_result,0.0);
}
double eval_dexcitation_integral(double ne,double T,double mu, double DeltaE, int allowed)
{
gsl_function fun;
fun.function = &integrand_deexcitation;
fparams_exc.T=T;
fparams_exc.ne=ne;
fparams_exc.mu=mu;
fparams_exc.DeltaE=DeltaE;
fparams_exc.allowed=allowed;
fun.params = &fparams_exc;
double integ_result=0;
double integ_err=0;
// gsl_integration_qagiu(&fun, DeltaE, integ_abstol, integ_reltol, integ_meshdim,
// winteg_exc, &integ_result, &integ_err);
//Wird sonst immer zu null
// gsl_integration_qags (&fun, DeltaE, muINF, integ_abstol, integ_reltol, integ_meshdim,
// winteg_exc, &integ_result, &integ_err);
//Variante Aslan
fparams_exc.mu=mu+DeltaE;
fun.function = &integrand_excitation;
// gsl_integration_qags (&fun, DeltaE, muINF, integ_abstol, integ_reltol, integ_meshdim,
// winteg_exc, &integ_result, &integ_err);
gsl_integration_qag(&fun, DeltaE, muINF, integ_abstol, integ_reltol, integ_meshdim,1,
winteg_exc, &integ_result, &integ_err);
// size_t neval;
// gsl_integration_qng(&fun, DeltaE, muINF, integ_abstol, integ_reltol, //FAST?
// &integ_result, &integ_err, &neval);
// if(integ_result < MINRATE) integ_result=0.0;
return MAX(integ_result,0.0);
}
double integrand_deexcitation(double x,void *p)
{
struct my_f_params * params = (struct my_f_params *)p;
double eng=x;
double DeltaE = params->DeltaE;
double ne=params->ne;
double T=params->T;
double mu=params->mu;
int allowed=params->allowed;
double alpha=0.05;
double beta=4.0;
double vel=sqrt(2.0*eng/EMASS);
// if(vel< 1e-100) return 0;
double fermi_fun=1.0/(1.0+exp((eng-DeltaE-mu)/BOLTZMAN/T)); //mod
// if(fermi_fun < 1e-100) return 0;
double sigma=0.0;
double y=eng/DeltaE;
double Pauli=1.0-1.0/(1.0+exp((eng+mu)/BOLTZMAN/T)); //mod
// if(Pauli<1e-100) return 0;
if(allowed==1)
sigma=4.0*M_PI*bohr_radius_sq*E_ion_H_sq_J* gsl_pow_2(1.0/DeltaE)*alpha*(y-1.0)/gsl_pow_2(y)*log(5*beta*y/4);
else
sigma=4.0*M_PI*bohr_radius_sq*alpha*(y-1.0)/gsl_pow_2(y);
double F=double_emass_pow_3_2/2.0/ne/hbar_cub/pi_sq*sqrt(eng)*fermi_fun*sqrt(eng/(eng-DeltaE)); //ACHTUNG: Letzter Term --> divergent
return vel*sigma*F*Pauli;
}
|
convolution_sgemm_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_int8_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD
if (ncnn::cpu_support_arm_asimddp())
{
void im2col_sgemm_int8_neon_arm82dot(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_int8_neon_arm82dot(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __ARM_NEON
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
if (inch >= 8)
{
if (size >= 16)
tmp.create(16 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator);
}
else if (inch >= 4)
{
if (size >= 16)
tmp.create(16 * maxk, inch / 4 + inch % 4, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
}
else
{
if (size >= 16)
tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 1, opt.workspace_allocator);
}
#else // __ARM_FEATURE_DOTPROD
if (inch >= 8)
{
if (size >= 4)
tmp.create(4 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator);
}
else if (inch >= 4)
{
if (size >= 4)
tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
}
else
{
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
}
#endif // __ARM_FEATURE_DOTPROD
#else // __aarch64__
if (inch >= 8)
{
if (size >= 2)
tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator);
}
else if (inch >= 4)
{
if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
}
else
{
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
}
#endif // __aarch64__
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
int nn_size = size >> 4;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 16;
signed char* tmpptr = tmp.channel(i / 16);
int q = 0;
for (; q + 7 < inch; q += 8)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i;
const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i;
const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i;
const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"ld1 {v0.16b}, [%0] \n"
"ld1 {v1.16b}, [%1] \n"
"ld1 {v2.16b}, [%2] \n"
"ld1 {v3.16b}, [%3] \n"
"ld1 {v4.16b}, [%4] \n"
"ld1 {v5.16b}, [%5] \n"
"ld1 {v6.16b}, [%6] \n"
"ld1 {v7.16b}, [%7] \n"
"st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%8], #64 \n"
"st4 {v4.16b, v5.16b, v6.16b, v7.16b}, [%8], #64 \n"
: "=r"(img0), // %0
"=r"(img1),
"=r"(img2),
"=r"(img3),
"=r"(img4),
"=r"(img5),
"=r"(img6),
"=r"(img7),
"=r"(tmpptr) // %8
: "0"(img0),
"1"(img1),
"2"(img2),
"3"(img3),
"4"(img4),
"5"(img5),
"6"(img6),
"7"(img7),
"8"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
img0 += size;
img1 += size;
img2 += size;
img3 += size;
img4 += size;
img5 += size;
img6 += size;
img7 += size;
}
}
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"ld1 {v0.16b}, [%0] \n"
"ld1 {v1.16b}, [%1] \n"
"ld1 {v2.16b}, [%2] \n"
"ld1 {v3.16b}, [%3] \n"
"st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%4], #64 \n"
: "=r"(img0), // %0
"=r"(img1),
"=r"(img2),
"=r"(img3),
"=r"(tmpptr) // %4
: "0"(img0),
"1"(img1),
"2"(img2),
"3"(img3),
"4"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.16b}, [%0] \n"
"st1 {v0.16b}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
img0 += size;
}
}
}
remain_size_start += nn_size << 4;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8);
int q = 0;
for (; q + 7 < inch; q += 8)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i;
const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i;
const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i;
const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"ld1 {v0.8b}, [%0] \n"
"ld1 {v1.8b}, [%1] \n"
"ld1 {v2.8b}, [%2] \n"
"ld1 {v3.8b}, [%3] \n"
"ld1 {v4.8b}, [%4] \n"
"ld1 {v5.8b}, [%5] \n"
"ld1 {v6.8b}, [%6] \n"
"ld1 {v7.8b}, [%7] \n"
"st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%8], #32 \n"
"st4 {v4.8b, v5.8b, v6.8b, v7.8b}, [%8], #32 \n"
: "=r"(img0), // %0
"=r"(img1),
"=r"(img2),
"=r"(img3),
"=r"(img4),
"=r"(img5),
"=r"(img6),
"=r"(img7),
"=r"(tmpptr) // %8
: "0"(img0),
"1"(img1),
"2"(img2),
"3"(img3),
"4"(img4),
"5"(img5),
"6"(img6),
"7"(img7),
"8"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
img0 += size;
img1 += size;
img2 += size;
img3 += size;
img4 += size;
img5 += size;
img6 += size;
img7 += size;
}
}
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"ld1 {v0.8b}, [%0] \n"
"ld1 {v1.8b}, [%1] \n"
"ld1 {v2.8b}, [%2] \n"
"ld1 {v3.8b}, [%3] \n"
"st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%4], #32 \n"
: "=r"(img0), // %0
"=r"(img1),
"=r"(img2),
"=r"(img3),
"=r"(tmpptr) // %4
: "0"(img0),
"1"(img1),
"2"(img2),
"3"(img3),
"4"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.8b}, [%0] \n"
"st1 {v0.8b}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
img0 += size;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#else // __ARM_FEATURE_DOTPROD
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 2;
#endif // __ARM_FEATURE_DOTPROD
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4);
#else
signed char* tmpptr = tmp.channel(i / 4);
#endif
int q = 0;
for (; q + 7 < inch; q += 8)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i;
const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i;
const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i;
const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i;
for (int k = 0; k < maxk; k++)
{
#if __ARM_FEATURE_DOTPROD
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
tmpptr[0] = img0[2];
tmpptr[1] = img1[2];
tmpptr[2] = img2[2];
tmpptr[3] = img3[2];
tmpptr[4] = img0[3];
tmpptr[5] = img1[3];
tmpptr[6] = img2[3];
tmpptr[7] = img3[3];
tmpptr += 8;
tmpptr[0] = img4[0];
tmpptr[1] = img5[0];
tmpptr[2] = img6[0];
tmpptr[3] = img7[0];
tmpptr[4] = img4[1];
tmpptr[5] = img5[1];
tmpptr[6] = img6[1];
tmpptr[7] = img7[1];
tmpptr += 8;
tmpptr[0] = img4[2];
tmpptr[1] = img5[2];
tmpptr[2] = img6[2];
tmpptr[3] = img7[2];
tmpptr[4] = img4[3];
tmpptr[5] = img5[3];
tmpptr[6] = img6[3];
tmpptr[7] = img7[3];
tmpptr += 8;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img4[0];
tmpptr[5] = img5[0];
tmpptr[6] = img6[0];
tmpptr[7] = img7[0];
tmpptr += 8;
tmpptr[0] = img0[1];
tmpptr[1] = img1[1];
tmpptr[2] = img2[1];
tmpptr[3] = img3[1];
tmpptr[4] = img4[1];
tmpptr[5] = img5[1];
tmpptr[6] = img6[1];
tmpptr[7] = img7[1];
tmpptr += 8;
tmpptr[0] = img0[2];
tmpptr[1] = img1[2];
tmpptr[2] = img2[2];
tmpptr[3] = img3[2];
tmpptr[4] = img4[2];
tmpptr[5] = img5[2];
tmpptr[6] = img6[2];
tmpptr[7] = img7[2];
tmpptr += 8;
tmpptr[0] = img0[3];
tmpptr[1] = img1[3];
tmpptr[2] = img2[3];
tmpptr[3] = img3[3];
tmpptr[4] = img4[3];
tmpptr[5] = img5[3];
tmpptr[6] = img6[3];
tmpptr[7] = img7[3];
tmpptr += 8;
#endif // __ARM_FEATURE_DOTPROD
img0 += size;
img1 += size;
img2 += size;
img3 += size;
img4 += size;
img5 += size;
img6 += size;
img7 += size;
}
}
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
tmpptr[0] = img0[2];
tmpptr[1] = img1[2];
tmpptr[2] = img2[2];
tmpptr[3] = img3[2];
tmpptr[4] = img0[3];
tmpptr[5] = img1[3];
tmpptr[6] = img2[3];
tmpptr[7] = img3[3];
tmpptr += 8;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2);
#else
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#endif
#else
signed char* tmpptr = tmp.channel(i / 2);
#endif
int q = 0;
for (; q + 7 < inch; q += 8)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i;
const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i;
const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i;
const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i;
for (int k = 0; k < maxk; k++)
{
#if __ARM_FEATURE_DOTPROD
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
tmpptr[0] = img4[0];
tmpptr[1] = img5[0];
tmpptr[2] = img6[0];
tmpptr[3] = img7[0];
tmpptr[4] = img4[1];
tmpptr[5] = img5[1];
tmpptr[6] = img6[1];
tmpptr[7] = img7[1];
tmpptr += 8;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img4[0];
tmpptr[5] = img5[0];
tmpptr[6] = img6[0];
tmpptr[7] = img7[0];
tmpptr += 8;
tmpptr[0] = img0[1];
tmpptr[1] = img1[1];
tmpptr[2] = img2[1];
tmpptr[3] = img3[1];
tmpptr[4] = img4[1];
tmpptr[5] = img5[1];
tmpptr[6] = img6[1];
tmpptr[7] = img7[1];
tmpptr += 8;
#endif // __ARM_FEATURE_DOTPROD
img0 += size;
img1 += size;
img2 += size;
img3 += size;
img4 += size;
img5 += size;
img6 += size;
img7 += size;
}
}
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#else
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#endif
#else
signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
int q = 0;
for (; q + 7 < inch; q += 8)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i;
const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i;
const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i;
const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img4[0];
tmpptr[5] = img5[0];
tmpptr[6] = img6[0];
tmpptr[7] = img7[0];
tmpptr += 8;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
img4 += size;
img5 += size;
img6 += size;
img7 += size;
}
}
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#else // __ARM_NEON
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
signed char* tmpptr = tmp.channel(i);
int q = 0;
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#endif // __ARM_NEON
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
for (; i + 15 < size; i += 16)
{
const signed char* tmpptr = tmp.channel(i / 16);
const signed char* kptr0 = kernel.channel(p / 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"cmp %w4, #0 \n"
"beq 1f \n"
"ld1 {v8.16b}, [%8], #16 \n" // _w0123_l
"ld1 {v0.16b}, [%7], #16 \n" // _val0123_l
"0: \n"
"ld1 {v1.16b}, [%7], #16 \n" // _val4567_l
"sdot v16.4s, v8.16b, v0.4b[0] \n"
"sdot v17.4s, v8.16b, v0.4b[1] \n"
"sdot v18.4s, v8.16b, v0.4b[2] \n"
"sdot v19.4s, v8.16b, v0.4b[3] \n"
"ld1 {v2.16b}, [%7], #16 \n" // _val891011_l
"sdot v20.4s, v8.16b, v1.4b[0] \n"
"sdot v21.4s, v8.16b, v1.4b[1] \n"
"sdot v22.4s, v8.16b, v1.4b[2] \n"
"sdot v23.4s, v8.16b, v1.4b[3] \n"
"ld1 {v3.16b}, [%7], #16 \n" // _val12131415_l
"sdot v24.4s, v8.16b, v2.4b[0] \n"
"sdot v25.4s, v8.16b, v2.4b[1] \n"
"ld1 {v9.16b}, [%8], #16 \n" // _w0123_h
"sdot v26.4s, v8.16b, v2.4b[2] \n"
"sdot v27.4s, v8.16b, v2.4b[3] \n"
"ld1 {v4.16b}, [%7], #16 \n" // _val0123_h
"sdot v28.4s, v8.16b, v3.4b[0] \n"
"sdot v29.4s, v8.16b, v3.4b[1] \n"
"sdot v30.4s, v8.16b, v3.4b[2] \n"
"sdot v31.4s, v8.16b, v3.4b[3] \n"
"ld1 {v5.16b}, [%7], #16 \n" // _val4567_h
"sdot v16.4s, v9.16b, v4.4b[0] \n"
"sdot v17.4s, v9.16b, v4.4b[1] \n"
"sdot v18.4s, v9.16b, v4.4b[2] \n"
"sdot v19.4s, v9.16b, v4.4b[3] \n"
"ld1 {v6.16b}, [%7], #16 \n" // _val891011_h
"sdot v20.4s, v9.16b, v5.4b[0] \n"
"sdot v21.4s, v9.16b, v5.4b[1] \n"
"sdot v22.4s, v9.16b, v5.4b[2] \n"
"sdot v23.4s, v9.16b, v5.4b[3] \n"
"ld1 {v7.16b}, [%7], #16 \n" // _val12131415_h
"sdot v24.4s, v9.16b, v6.4b[0] \n"
"sdot v25.4s, v9.16b, v6.4b[1] \n"
"ld1 {v8.16b}, [%8], #16 \n" // _w0123_l
"sdot v26.4s, v9.16b, v6.4b[2] \n"
"sdot v27.4s, v9.16b, v6.4b[3] \n"
"ld1 {v0.16b}, [%7], #16 \n" // _val0123_l
"sdot v28.4s, v9.16b, v7.4b[0] \n"
"sdot v29.4s, v9.16b, v7.4b[1] \n"
"subs %w4, %w4, #1 \n"
"sdot v30.4s, v9.16b, v7.4b[2] \n"
"sdot v31.4s, v9.16b, v7.4b[3] \n"
"bne 0b \n"
"sub %7, %7, #16 \n"
"sub %8, %8, #16 \n"
"1: \n"
"cmp %w5, #0 \n"
"beq 3f \n"
"2: \n"
"ld1 {v8.16b}, [%8], #16 \n"
"ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%7], #64 \n"
"sdot v16.4s, v8.16b, v0.4b[0] \n"
"sdot v17.4s, v8.16b, v0.4b[1] \n"
"sdot v18.4s, v8.16b, v0.4b[2] \n"
"sdot v19.4s, v8.16b, v0.4b[3] \n"
"sdot v20.4s, v8.16b, v1.4b[0] \n"
"sdot v21.4s, v8.16b, v1.4b[1] \n"
"sdot v22.4s, v8.16b, v1.4b[2] \n"
"sdot v23.4s, v8.16b, v1.4b[3] \n"
"sdot v24.4s, v8.16b, v2.4b[0] \n"
"sdot v25.4s, v8.16b, v2.4b[1] \n"
"sdot v26.4s, v8.16b, v2.4b[2] \n"
"sdot v27.4s, v8.16b, v2.4b[3] \n"
"sdot v28.4s, v8.16b, v3.4b[0] \n"
"sdot v29.4s, v8.16b, v3.4b[1] \n"
"subs %w5, %w5, #1 \n"
"sdot v30.4s, v8.16b, v3.4b[2] \n"
"sdot v31.4s, v8.16b, v3.4b[3] \n"
"bne 2b \n"
"3: \n"
"lsr w4, %w6, #2 \n" // w4 = nn1 >> 2
"cmp w4, #0 \n"
"beq 5f \n"
"4: \n"
"ld1 {v8.8b, v9.8b}, [%8], #16 \n"
"ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%7], #64 \n"
"uzp1 v10.8b, v8.8b, v9.8b \n"
"uzp2 v11.8b, v8.8b, v9.8b \n"
"uzp1 v4.16b, v0.16b, v1.16b \n"
"uzp2 v5.16b, v0.16b, v1.16b \n"
"uzp1 v6.16b, v2.16b, v3.16b \n"
"uzp2 v7.16b, v2.16b, v3.16b \n"
"uzp1 v8.8b, v10.8b, v11.8b \n"
"uzp2 v9.8b, v10.8b, v11.8b \n"
"uzp1 v0.16b, v4.16b, v5.16b \n" // 0 1 4 5
"uzp2 v1.16b, v4.16b, v5.16b \n" // 8 9 c d
"mov v8.d[1], v9.d[0] \n" // _w
"uzp1 v2.16b, v6.16b, v7.16b \n" // 2 3 6 7
"uzp2 v3.16b, v6.16b, v7.16b \n" // a b e f
"sdot v16.4s, v8.16b, v0.4b[0] \n"
"sdot v17.4s, v8.16b, v0.4b[1] \n"
"sdot v18.4s, v8.16b, v2.4b[0] \n"
"sdot v19.4s, v8.16b, v2.4b[1] \n"
"sdot v20.4s, v8.16b, v0.4b[2] \n"
"sdot v21.4s, v8.16b, v0.4b[3] \n"
"sdot v22.4s, v8.16b, v2.4b[2] \n"
"sdot v23.4s, v8.16b, v2.4b[3] \n"
"sdot v24.4s, v8.16b, v1.4b[0] \n"
"sdot v25.4s, v8.16b, v1.4b[1] \n"
"sdot v26.4s, v8.16b, v3.4b[0] \n"
"sdot v27.4s, v8.16b, v3.4b[1] \n"
"sdot v28.4s, v8.16b, v1.4b[2] \n"
"sdot v29.4s, v8.16b, v1.4b[3] \n"
"sdot v30.4s, v8.16b, v3.4b[2] \n"
"sdot v31.4s, v8.16b, v3.4b[3] \n"
"subs w4, w4, #1 \n"
"bne 4b \n"
"5: \n"
"and w4, %w6, #3 \n" // w4 = remain = nn1 & 3
"cmp w4, #0 \n" // w4 > 0
"beq 7f \n"
"6: \n"
"ld1 {v1.8b}, [%8] \n"
"ld1 {v0.16b}, [%7] \n"
"sshll v1.8h, v1.8b, #0 \n"
"sshll v2.8h, v0.8b, #0 \n"
"sshll2 v3.8h, v0.16b, #0 \n"
"smlal v16.4s, v1.4h, v2.h[0] \n"
"smlal v17.4s, v1.4h, v2.h[1] \n"
"smlal v18.4s, v1.4h, v2.h[2] \n"
"smlal v19.4s, v1.4h, v2.h[3] \n"
"smlal v20.4s, v1.4h, v2.h[4] \n"
"smlal v21.4s, v1.4h, v2.h[5] \n"
"smlal v22.4s, v1.4h, v2.h[6] \n"
"smlal v23.4s, v1.4h, v2.h[7] \n"
"smlal v24.4s, v1.4h, v3.h[0] \n"
"smlal v25.4s, v1.4h, v3.h[1] \n"
"smlal v26.4s, v1.4h, v3.h[2] \n"
"smlal v27.4s, v1.4h, v3.h[3] \n"
"smlal v28.4s, v1.4h, v3.h[4] \n"
"smlal v29.4s, v1.4h, v3.h[5] \n"
"smlal v30.4s, v1.4h, v3.h[6] \n"
"smlal v31.4s, v1.4h, v3.h[7] \n"
"add %7, %7, #16 \n"
"add %8, %8, #4 \n"
"subs w4, w4, #1 \n"
"bne 6b \n"
"7: \n"
// transpose 4x16
"trn1 v0.4s, v16.4s, v17.4s \n"
"trn2 v1.4s, v16.4s, v17.4s \n"
"trn1 v2.4s, v18.4s, v19.4s \n"
"trn2 v3.4s, v18.4s, v19.4s \n"
"trn1 v4.4s, v20.4s, v21.4s \n"
"trn2 v5.4s, v20.4s, v21.4s \n"
"trn1 v6.4s, v22.4s, v23.4s \n"
"trn2 v7.4s, v22.4s, v23.4s \n"
"trn1 v8.4s, v24.4s, v25.4s \n"
"trn2 v9.4s, v24.4s, v25.4s \n"
"trn1 v10.4s, v26.4s, v27.4s \n"
"trn2 v11.4s, v26.4s, v27.4s \n"
"trn1 v12.4s, v28.4s, v29.4s \n"
"trn2 v13.4s, v28.4s, v29.4s \n"
"trn1 v14.4s, v30.4s, v31.4s \n"
"trn2 v15.4s, v30.4s, v31.4s \n"
"trn1 v16.2d, v0.2d, v2.2d \n"
"trn2 v24.2d, v0.2d, v2.2d \n"
"trn1 v20.2d, v1.2d, v3.2d \n"
"trn2 v28.2d, v1.2d, v3.2d \n"
"trn1 v17.2d, v4.2d, v6.2d \n"
"trn2 v25.2d, v4.2d, v6.2d \n"
"trn1 v21.2d, v5.2d, v7.2d \n"
"trn2 v29.2d, v5.2d, v7.2d \n"
"trn1 v18.2d, v8.2d, v10.2d \n"
"trn2 v26.2d, v8.2d, v10.2d \n"
"trn1 v22.2d, v9.2d, v11.2d \n"
"trn2 v30.2d, v9.2d, v11.2d \n"
"trn1 v19.2d, v12.2d, v14.2d \n"
"trn2 v27.2d, v12.2d, v14.2d \n"
"trn1 v23.2d, v13.2d, v15.2d \n"
"trn2 v31.2d, v13.2d, v15.2d \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%3], #64 \n"
: "=r"(outptr0),
"=r"(outptr1),
"=r"(outptr2),
"=r"(outptr3),
"=r"(nn),
"=r"(nn4),
"=r"(nn1),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(nn),
"5"(nn4),
"6"(nn1),
"7"(tmpptr),
"8"(kptr0)
: "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8);
const signed char* kptr0 = kernel.channel(p / 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val4567_l = vld1q_s8(tmpptr + 16);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 32);
int8x16_t _val4567_h = vld1q_s8(tmpptr + 48);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3);
tmpptr += 64;
kptr0 += 32;
}
for (int j = 0; j < nn4; j++)
{
int8x16_t _val0123 = vld1q_s8(tmpptr);
int8x16_t _val4567 = vld1q_s8(tmpptr + 16);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0, _val0123, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0, _val0123, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0, _val0123, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0, _val0123, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0, _val4567, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0, _val4567, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0, _val4567, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0, _val4567, 3);
tmpptr += 32;
kptr0 += 16;
}
int j = 0;
for (; j + 3 < nn1; j += 4)
{
int8x8x4_t _val4 = vld4_s8(tmpptr);
int8x8x2_t _val0145 = vuzp_s8(_val4.val[0], _val4.val[1]);
int8x8x2_t _val2367 = vuzp_s8(_val4.val[2], _val4.val[3]);
int8x16_t _val0123 = vcombine_s8(_val0145.val[0], _val2367.val[0]);
int8x16_t _val4567 = vcombine_s8(_val0145.val[1], _val2367.val[1]);
int8x16_t _w = vld1q_s8(kptr0);
int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w));
int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]);
int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]);
_sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0123f, _val4567, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0123f, _val4567, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0123f, _val4567, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0123f, _val4567, 3);
tmpptr += 32;
kptr0 += 16;
}
for (; j < nn1; j++)
{
int16x4_t _val0 = vdup_n_s16(tmpptr[0]);
int16x4_t _val1 = vdup_n_s16(tmpptr[1]);
int16x4_t _val2 = vdup_n_s16(tmpptr[2]);
int16x4_t _val3 = vdup_n_s16(tmpptr[3]);
int16x4_t _val4 = vdup_n_s16(tmpptr[4]);
int16x4_t _val5 = vdup_n_s16(tmpptr[5]);
int16x4_t _val6 = vdup_n_s16(tmpptr[6]);
int16x4_t _val7 = vdup_n_s16(tmpptr[7]);
int16x4_t _w0123;
_w0123 = vset_lane_s16(kptr0[0], _w0123, 0);
_w0123 = vset_lane_s16(kptr0[1], _w0123, 1);
_w0123 = vset_lane_s16(kptr0[2], _w0123, 2);
_w0123 = vset_lane_s16(kptr0[3], _w0123, 3);
_sum0 = vmlal_s16(_sum0, _val0, _w0123);
_sum1 = vmlal_s16(_sum1, _val1, _w0123);
_sum2 = vmlal_s16(_sum2, _val2, _w0123);
_sum3 = vmlal_s16(_sum3, _val3, _w0123);
_sum4 = vmlal_s16(_sum4, _val4, _w0123);
_sum5 = vmlal_s16(_sum5, _val5, _w0123);
_sum6 = vmlal_s16(_sum6, _val6, _w0123);
_sum7 = vmlal_s16(_sum7, _val7, _w0123);
tmpptr += 8;
kptr0 += 4;
}
// transpose 4x8
int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1);
int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3);
int32x4x2_t _s45 = vtrnq_s32(_sum4, _sum5);
int32x4x2_t _s67 = vtrnq_s32(_sum6, _sum7);
_sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0]));
_sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1]));
_sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0]));
_sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1]));
_sum4 = vcombine_s32(vget_low_s32(_s45.val[0]), vget_low_s32(_s67.val[0]));
_sum5 = vcombine_s32(vget_low_s32(_s45.val[1]), vget_low_s32(_s67.val[1]));
_sum6 = vcombine_s32(vget_high_s32(_s45.val[0]), vget_high_s32(_s67.val[0]));
_sum7 = vcombine_s32(vget_high_s32(_s45.val[1]), vget_high_s32(_s67.val[1]));
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr1, _sum1);
vst1q_s32(outptr2, _sum2);
vst1q_s32(outptr3, _sum3);
vst1q_s32(outptr0 + 4, _sum4);
vst1q_s32(outptr1 + 4, _sum5);
vst1q_s32(outptr2 + 4, _sum6);
vst1q_s32(outptr3 + 4, _sum7);
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#endif
for (; i + 3 < size; i += 4)
{
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4);
#else
const signed char* tmpptr = tmp.channel(i / 4);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 16);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3);
tmpptr += 32;
kptr0 += 32;
}
for (int j = 0; j < nn4; j++)
{
int8x16_t _val0123 = vld1q_s8(tmpptr);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0, _val0123, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0, _val0123, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0, _val0123, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0, _val0123, 3);
tmpptr += 16;
kptr0 += 16;
}
int j = 0;
for (; j + 3 < nn1; j += 4)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x8x2_t _val01 = vuzp_s8(vget_low_s8(_val), vget_high_s8(_val));
int8x8x2_t _val0123 = vuzp_s8(_val01.val[0], _val01.val[1]);
int8x16_t _val0123f = vcombine_s8(_val0123.val[0], _val0123.val[1]);
int8x16_t _w = vld1q_s8(kptr0);
int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w));
int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]);
int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]);
_sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123f, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123f, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123f, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123f, 3);
tmpptr += 16;
kptr0 += 16;
}
for (; j < nn1; j++)
{
int16x4_t _val0 = vdup_n_s16(tmpptr[0]);
int16x4_t _val1 = vdup_n_s16(tmpptr[1]);
int16x4_t _val2 = vdup_n_s16(tmpptr[2]);
int16x4_t _val3 = vdup_n_s16(tmpptr[3]);
int16x4_t _w0123;
_w0123 = vset_lane_s16(kptr0[0], _w0123, 0);
_w0123 = vset_lane_s16(kptr0[1], _w0123, 1);
_w0123 = vset_lane_s16(kptr0[2], _w0123, 2);
_w0123 = vset_lane_s16(kptr0[3], _w0123, 3);
_sum0 = vmlal_s16(_sum0, _val0, _w0123);
_sum1 = vmlal_s16(_sum1, _val1, _w0123);
_sum2 = vmlal_s16(_sum2, _val2, _w0123);
_sum3 = vmlal_s16(_sum3, _val3, _w0123);
tmpptr += 4;
kptr0 += 4;
}
// transpose 4x4
int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1);
int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3);
_sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0]));
_sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1]));
_sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0]));
_sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1]));
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr1, _sum1);
vst1q_s32(outptr2, _sum2);
vst1q_s32(outptr3, _sum3);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#else // __ARM_FEATURE_DOTPROD
asm volatile(
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"cmp %w4, #0 \n"
"beq 3f \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"prfm pldl1keep, [%7, #128] \n"
"prfm pldl1keep, [%8, #256] \n"
"lsr w4, %w4, #1 \n" // w4 = nn >> 1
"cmp w4, #0 \n"
"beq 1f \n"
"prfm pldl1keep, [%8, #512] \n"
"add x5, %7, #16 \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v16.16b}, [%7] \n" // val L H
"ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%8], #64 \n"
"add %7, %7, #32 \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"ld1 {v18.16b}, [%7] \n"
"add %7, %7, #32 \n"
"0: \n"
"smull v24.8h, v16.8b, v20.8b \n"
"prfm pldl1keep, [%8, #256] \n"
"smull2 v25.8h, v17.16b, v20.16b \n"
"prfm pldl1keep, [%8, #512] \n"
"smull v26.8h, v16.8b, v21.8b \n"
"subs w4, w4, #1 \n"
"smull2 v27.8h, v17.16b, v21.16b \n"
"ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L
"smlal v24.8h, v18.8b, v22.8b \n"
"smlal2 v25.8h, v19.16b, v22.16b \n"
"smlal v26.8h, v18.8b, v23.8b \n"
"smlal2 v27.8h, v19.16b, v23.16b \n"
"smull2 v29.8h, v16.16b, v20.16b \n"
"sadalp v0.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v1.4s, v25.8h \n"
"smull2 v31.8h, v16.16b, v21.16b \n"
"ld1 {v16.16b}, [x5] \n" // val L H
"smull v30.8h, v17.8b, v21.8b \n"
"add x5, x5, #32 \n"
"smlal2 v29.8h, v18.16b, v22.16b \n"
"sadalp v2.4s, v26.8h \n"
"smlal v28.8h, v19.8b, v22.8b \n"
"sadalp v3.4s, v27.8h \n"
"smlal2 v31.8h, v18.16b, v23.16b \n"
"ld1 {v18.16b}, [x5] \n"
"smlal v30.8h, v19.8b, v23.8b \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"smull v24.8h, v16.8b, v20.8b \n"
"add x5, x5, #32 \n"
"smull2 v25.8h, v17.16b, v20.16b \n"
"prfm pldl1keep, [x5, #128] \n"
"smull v26.8h, v16.8b, v21.8b \n"
"prfm pldl1keep, [x5, #384] \n"
"smull2 v27.8h, v17.16b, v21.16b \n"
"ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L
"smlal v24.8h, v18.8b, v22.8b \n"
"sadalp v5.4s, v29.8h \n"
"smlal2 v25.8h, v19.16b, v22.16b \n"
"sadalp v4.4s, v28.8h \n"
"smlal v26.8h, v18.8b, v23.8b \n"
"sadalp v7.4s, v31.8h \n"
"smlal2 v27.8h, v19.16b, v23.16b \n"
"sadalp v6.4s, v30.8h \n"
"smull2 v29.8h, v16.16b, v20.16b \n"
"sadalp v8.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v9.4s, v25.8h \n"
"smull2 v31.8h, v16.16b, v21.16b \n"
"ld1 {v16.16b}, [%7] \n" // val L H
"smull v30.8h, v17.8b, v21.8b \n"
"add %7, %7, #32 \n"
"smlal2 v29.8h, v18.16b, v22.16b \n"
"sadalp v10.4s, v26.8h \n"
"smlal v28.8h, v19.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smlal2 v31.8h, v18.16b, v23.16b \n"
"ld1 {v18.16b}, [%7] \n"
"smlal v30.8h, v19.8b, v23.8b \n"
"add %7, %7, #32 \n"
"ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%8], #64 \n"
"sadalp v13.4s, v29.8h \n"
"prfm pldl1keep, [%7, #128] \n"
"sadalp v12.4s, v28.8h \n"
"prfm pldl1keep, [%7, #384] \n"
"sadalp v15.4s, v31.8h \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"sadalp v14.4s, v30.8h \n"
"bne 0b \n"
"sub %7, %7, #64 \n"
"sub %8, %8, #64 \n"
"1: \n"
"and w4, %w4, #1 \n" // w4 = remain = nn & 1
"cmp w4, #0 \n" // w4 > 0
"beq 2f \n"
"ld1 {v16.8b, v17.8b}, [%7], #16 \n"
"ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%8], #32 \n"
"smull v24.8h, v16.8b, v20.8b \n"
"smull v25.8h, v16.8b, v21.8b \n"
"smull v26.8h, v16.8b, v22.8b \n"
"ld1 {v18.8b, v19.8b}, [%7], #16 \n"
"smull v27.8h, v16.8b, v23.8b \n"
"sadalp v0.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v1.4s, v25.8h \n"
"smull v29.8h, v17.8b, v21.8b \n"
"sadalp v2.4s, v26.8h \n"
"smull v30.8h, v17.8b, v22.8b \n"
"sadalp v3.4s, v27.8h \n"
"smull v31.8h, v17.8b, v23.8b \n"
"sadalp v4.4s, v28.8h \n"
"smull v24.8h, v18.8b, v20.8b \n"
"sadalp v5.4s, v29.8h \n"
"smull v25.8h, v18.8b, v21.8b \n"
"sadalp v6.4s, v30.8h \n"
"smull v26.8h, v18.8b, v22.8b \n"
"sadalp v7.4s, v31.8h \n"
"smull v27.8h, v18.8b, v23.8b \n"
"sadalp v8.4s, v24.8h \n"
"smull v28.8h, v19.8b, v20.8b \n"
"sadalp v9.4s, v25.8h \n"
"smull v29.8h, v19.8b, v21.8b \n"
"sadalp v10.4s, v26.8h \n"
"smull v30.8h, v19.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smull v31.8h, v19.8b, v23.8b \n"
"sadalp v12.4s, v28.8h \n"
"sadalp v13.4s, v29.8h \n"
"sadalp v14.4s, v30.8h \n"
"sadalp v15.4s, v31.8h \n"
"2: \n"
"addp v0.4s, v0.4s, v1.4s \n"
"addp v2.4s, v2.4s, v3.4s \n"
"addp v4.4s, v4.4s, v5.4s \n"
"addp v6.4s, v6.4s, v7.4s \n"
"addp v8.4s, v8.4s, v9.4s \n"
"addp v10.4s, v10.4s, v11.4s \n"
"addp v12.4s, v12.4s, v13.4s \n"
"addp v14.4s, v14.4s, v15.4s \n"
"addp v0.4s, v0.4s, v2.4s \n"
"addp v1.4s, v4.4s, v6.4s \n"
"addp v2.4s, v8.4s, v10.4s \n"
"addp v3.4s, v12.4s, v14.4s \n"
"3: \n"
"cmp %w5, #0 \n"
"beq 7f \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"lsr w4, %w5, #1 \n" // w4 = nn4 >> 1
"cmp w4, #0 \n"
"beq 5f \n"
"4: \n"
"ld1 {v16.8b, v17.8b}, [%7], #16 \n"
"ld1 {v22.8b, v23.8b}, [%8], #16 \n"
"zip1 v18.2s, v16.2s, v16.2s \n" // _val00
"zip2 v19.2s, v16.2s, v16.2s \n" // _val11
"smull v24.8h, v18.8b, v22.8b \n"
"smull v25.8h, v18.8b, v23.8b \n"
"zip1 v20.2s, v17.2s, v17.2s \n" // _val22
"smull v26.8h, v19.8b, v22.8b \n"
"smull v27.8h, v19.8b, v23.8b \n"
"zip2 v21.2s, v17.2s, v17.2s \n" // _val33
"smull v28.8h, v20.8b, v22.8b \n"
"smull v29.8h, v20.8b, v23.8b \n"
"ld1 {v16.8b, v17.8b}, [%7], #16 \n"
"smull v30.8h, v21.8b, v22.8b \n"
"smull v31.8h, v21.8b, v23.8b \n"
"ld1 {v22.8b, v23.8b}, [%8], #16 \n"
"zip1 v18.2s, v16.2s, v16.2s \n" // _val44
"zip2 v19.2s, v16.2s, v16.2s \n" // _val55
"smlal v24.8h, v18.8b, v22.8b \n"
"smlal v25.8h, v18.8b, v23.8b \n"
"zip1 v20.2s, v17.2s, v17.2s \n" // _val66
"smlal v26.8h, v19.8b, v22.8b \n"
"smlal v27.8h, v19.8b, v23.8b \n"
"zip2 v21.2s, v17.2s, v17.2s \n" // _val77
"sadalp v8.4s, v24.8h \n"
"smlal v28.8h, v20.8b, v22.8b \n"
"sadalp v9.4s, v25.8h \n"
"smlal v29.8h, v20.8b, v23.8b \n"
"sadalp v10.4s, v26.8h \n"
"smlal v30.8h, v21.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smlal v31.8h, v21.8b, v23.8b \n"
"sadalp v12.4s, v28.8h \n"
"sadalp v13.4s, v29.8h \n"
"subs w4, w4, #1 \n"
"sadalp v14.4s, v30.8h \n"
"sadalp v15.4s, v31.8h \n"
"bne 4b \n"
"5: \n"
"and w4, %w5, #1 \n" // w4 = remain = nn4 & 1
"cmp w4, #0 \n" // w4 > 0
"beq 6f \n"
"ld1 {v16.8b, v17.8b}, [%7], #16 \n"
"ld1 {v22.8b, v23.8b}, [%8], #16 \n"
"zip1 v18.2s, v16.2s, v16.2s \n" // _val00
"zip2 v19.2s, v16.2s, v16.2s \n" // _val11
"smull v24.8h, v18.8b, v22.8b \n"
"smull v25.8h, v18.8b, v23.8b \n"
"zip1 v20.2s, v17.2s, v17.2s \n" // _val22
"smull v26.8h, v19.8b, v22.8b \n"
"smull v27.8h, v19.8b, v23.8b \n"
"zip2 v21.2s, v17.2s, v17.2s \n" // _val33
"sadalp v8.4s, v24.8h \n"
"smull v28.8h, v20.8b, v22.8b \n"
"sadalp v9.4s, v25.8h \n"
"smull v29.8h, v20.8b, v23.8b \n"
"sadalp v10.4s, v26.8h \n"
"smull v30.8h, v21.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smull v31.8h, v21.8b, v23.8b \n"
"sadalp v12.4s, v28.8h \n"
"sadalp v13.4s, v29.8h \n"
"sadalp v14.4s, v30.8h \n"
"sadalp v15.4s, v31.8h \n"
"6: \n"
"addp v8.4s, v8.4s, v9.4s \n"
"addp v10.4s, v10.4s, v11.4s \n"
"addp v12.4s, v12.4s, v13.4s \n"
"addp v14.4s, v14.4s, v15.4s \n"
"add v0.4s, v0.4s, v8.4s \n"
"add v1.4s, v1.4s, v10.4s \n"
"add v2.4s, v2.4s, v12.4s \n"
"add v3.4s, v3.4s, v14.4s \n"
"7: \n"
"lsr w4, %w6, #2 \n" // w4 = nn1 >> 2
"cmp w4, #0 \n"
"beq 9f \n"
"8: \n"
"ld1 {v8.16b}, [%7], #16 \n"
"ld1 {v9.16b}, [%8], #16 \n"
"sshll v4.8h, v8.8b, #0 \n"
"sshll2 v5.8h, v8.16b, #0 \n"
"sshll v6.8h, v9.8b, #0 \n"
"sshll2 v7.8h, v9.16b, #0 \n"
"smlal v0.4s, v6.4h, v4.h[0] \n"
"smlal v1.4s, v6.4h, v4.h[1] \n"
"smlal v2.4s, v6.4h, v4.h[2] \n"
"smlal v3.4s, v6.4h, v4.h[3] \n"
"smlal2 v0.4s, v6.8h, v4.h[4] \n"
"smlal2 v1.4s, v6.8h, v4.h[5] \n"
"smlal2 v2.4s, v6.8h, v4.h[6] \n"
"smlal2 v3.4s, v6.8h, v4.h[7] \n"
"smlal v0.4s, v7.4h, v5.h[0] \n"
"smlal v1.4s, v7.4h, v5.h[1] \n"
"smlal v2.4s, v7.4h, v5.h[2] \n"
"smlal v3.4s, v7.4h, v5.h[3] \n"
"smlal2 v0.4s, v7.8h, v5.h[4] \n"
"smlal2 v1.4s, v7.8h, v5.h[5] \n"
"smlal2 v2.4s, v7.8h, v5.h[6] \n"
"smlal2 v3.4s, v7.8h, v5.h[7] \n"
"subs w4, w4, #1 \n"
"bne 8b \n"
"9: \n"
"and w4, %w6, #3 \n" // w4 = nn1 & 3
"cmp w4, #0 \n" // w4 > 0
"beq 11f \n"
"10: \n"
"ld1 {v4.8b}, [%7] \n"
"ld1 {v6.8b}, [%8] \n"
"sshll v4.8h, v4.8b, #0 \n"
"sshll v6.8h, v6.8b, #0 \n"
"smlal v0.4s, v6.4h, v4.h[0] \n"
"smlal v1.4s, v6.4h, v4.h[1] \n"
"smlal v2.4s, v6.4h, v4.h[2] \n"
"smlal v3.4s, v6.4h, v4.h[3] \n"
"add %7, %7, #4 \n"
"add %8, %8, #4 \n"
"subs w4, w4, #1 \n"
"bne 10b \n"
"11: \n"
// transpose 4x4
"trn1 v4.4s, v0.4s, v1.4s \n"
"trn2 v5.4s, v0.4s, v1.4s \n"
"trn1 v6.4s, v2.4s, v3.4s \n"
"trn2 v7.4s, v2.4s, v3.4s \n"
"trn1 v0.2d, v4.2d, v6.2d \n"
"trn2 v2.2d, v4.2d, v6.2d \n"
"trn1 v1.2d, v5.2d, v7.2d \n"
"trn2 v3.2d, v5.2d, v7.2d \n"
"st1 {v0.4s}, [%0], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%2], #16 \n"
"st1 {v3.4s}, [%3], #16 \n"
: "=r"(outptr0),
"=r"(outptr1),
"=r"(outptr2),
"=r"(outptr3),
"=r"(nn),
"=r"(nn4),
"=r"(nn1),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(nn),
"5"(nn4),
"6"(nn1),
"7"(tmpptr),
"8"(kptr0)
: "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#endif // __ARM_FEATURE_DOTPROD
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
#if __aarch64__
int32x4_t _sum00 = vdupq_n_s32(0);
int32x4_t _sum10 = vdupq_n_s32(0);
#if __ARM_FEATURE_DOTPROD
for (int j = 0; j < nn; j++)
{
int8x16_t _val01_l_h = vld1q_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum00 = vdotq_laneq_s32(_sum00, _w0123_l, _val01_l_h, 0);
_sum10 = vdotq_laneq_s32(_sum10, _w0123_l, _val01_l_h, 1);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum00 = vdotq_laneq_s32(_sum00, _w0123_h, _val01_l_h, 2);
_sum10 = vdotq_laneq_s32(_sum10, _w0123_h, _val01_l_h, 3);
tmpptr += 16;
kptr0 += 32;
}
if (nn4 > 0)
{
int j = 0;
for (; j + 1 < nn4; j += 2)
{
int8x16_t _val0123 = vld1q_s8(tmpptr);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum00 = vdotq_laneq_s32(_sum00, _w0, _val0123, 0);
_sum10 = vdotq_laneq_s32(_sum10, _w0, _val0123, 1);
int8x16_t _w1 = vld1q_s8(kptr0 + 16);
_sum00 = vdotq_laneq_s32(_sum00, _w1, _val0123, 2);
_sum10 = vdotq_laneq_s32(_sum10, _w1, _val0123, 3);
tmpptr += 16;
kptr0 += 32;
}
for (; j < nn4; j++)
{
int8x8_t _val01 = vld1_s8(tmpptr);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum00 = vdotq_lane_s32(_sum00, _w0, _val01, 0);
_sum10 = vdotq_lane_s32(_sum10, _w0, _val01, 1);
tmpptr += 8;
kptr0 += 16;
}
}
#else // __ARM_FEATURE_DOTPROD
if (nn > 0)
{
int32x4_t _sum01 = vdupq_n_s32(0);
int32x4_t _sum02 = vdupq_n_s32(0);
int32x4_t _sum03 = vdupq_n_s32(0);
int32x4_t _sum11 = vdupq_n_s32(0);
int32x4_t _sum12 = vdupq_n_s32(0);
int32x4_t _sum13 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01));
int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23));
int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23));
int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01));
int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23));
int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23));
int8x16_t _w45 = vld1q_s8(kptr0 + 32);
int8x16_t _w67 = vld1q_s8(kptr0 + 48);
_wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45));
_wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45));
_wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67));
_wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67));
_wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45));
_wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45));
_wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67));
_wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67));
_sum00 = vpadalq_s16(_sum00, _wv00);
_sum01 = vpadalq_s16(_sum01, _wv01);
_sum02 = vpadalq_s16(_sum02, _wv02);
_sum03 = vpadalq_s16(_sum03, _wv03);
_sum10 = vpadalq_s16(_sum10, _wv10);
_sum11 = vpadalq_s16(_sum11, _wv11);
_sum12 = vpadalq_s16(_sum12, _wv12);
_sum13 = vpadalq_s16(_sum13, _wv13);
tmpptr += 32;
kptr0 += 64;
}
for (; j < nn; j++)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01));
int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23));
int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23));
int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01));
int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23));
int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23));
_sum00 = vpadalq_s16(_sum00, _wv00);
_sum01 = vpadalq_s16(_sum01, _wv01);
_sum02 = vpadalq_s16(_sum02, _wv02);
_sum03 = vpadalq_s16(_sum03, _wv03);
_sum10 = vpadalq_s16(_sum10, _wv10);
_sum11 = vpadalq_s16(_sum11, _wv11);
_sum12 = vpadalq_s16(_sum12, _wv12);
_sum13 = vpadalq_s16(_sum13, _wv13);
tmpptr += 16;
kptr0 += 32;
}
int32x4_t _s001 = vpaddq_s32(_sum00, _sum01);
int32x4_t _s023 = vpaddq_s32(_sum02, _sum03);
int32x4_t _s101 = vpaddq_s32(_sum10, _sum11);
int32x4_t _s123 = vpaddq_s32(_sum12, _sum13);
_sum00 = vpaddq_s32(_s001, _s023);
_sum10 = vpaddq_s32(_s101, _s123);
}
if (nn4 > 0)
{
int32x4_t _sum100 = vdupq_n_s32(0);
int32x4_t _sum101 = vdupq_n_s32(0);
int32x4_t _sum110 = vdupq_n_s32(0);
int32x4_t _sum111 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn4; j += 2)
{
int8x16_t _val0123 = vld1q_s8(tmpptr);
int32x4x2_t _val00221133 = vzipq_s32(vreinterpretq_s32_s8(_val0123), vreinterpretq_s32_s8(_val0123));
int8x8_t _val00 = vreinterpret_s8_s32(vget_low_s32(_val00221133.val[0]));
int8x8_t _val11 = vreinterpret_s8_s32(vget_high_s32(_val00221133.val[0]));
int8x8_t _val22 = vreinterpret_s8_s32(vget_low_s32(_val00221133.val[1]));
int8x8_t _val33 = vreinterpret_s8_s32(vget_high_s32(_val00221133.val[1]));
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv00 = vmull_s8(_val00, vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(_val00, vget_high_s8(_w01));
int16x8_t _wv10 = vmull_s8(_val11, vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(_val11, vget_high_s8(_w01));
_wv00 = vmlal_s8(_wv00, _val22, vget_low_s8(_w23));
_wv01 = vmlal_s8(_wv01, _val22, vget_high_s8(_w23));
_wv10 = vmlal_s8(_wv10, _val33, vget_low_s8(_w23));
_wv11 = vmlal_s8(_wv11, _val33, vget_high_s8(_w23));
_sum100 = vpadalq_s16(_sum100, _wv00);
_sum101 = vpadalq_s16(_sum101, _wv01);
_sum110 = vpadalq_s16(_sum110, _wv10);
_sum111 = vpadalq_s16(_sum111, _wv11);
tmpptr += 16;
kptr0 += 32;
}
for (; j < nn4; j++)
{
int8x8_t _val01 = vld1_s8(tmpptr);
int32x2x2_t _val0011 = vzip_s32(vreinterpret_s32_s8(_val01), vreinterpret_s32_s8(_val01));
int8x8_t _val00 = vreinterpret_s8_s32(_val0011.val[0]);
int8x8_t _val11 = vreinterpret_s8_s32(_val0011.val[1]);
int8x16_t _w01 = vld1q_s8(kptr0);
int16x8_t _wv00 = vmull_s8(_val00, vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(_val00, vget_high_s8(_w01));
int16x8_t _wv10 = vmull_s8(_val11, vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(_val11, vget_high_s8(_w01));
_sum100 = vpadalq_s16(_sum100, _wv00);
_sum101 = vpadalq_s16(_sum101, _wv01);
_sum110 = vpadalq_s16(_sum110, _wv10);
_sum111 = vpadalq_s16(_sum111, _wv11);
tmpptr += 8;
kptr0 += 16;
}
int32x4_t _s001 = vpaddq_s32(_sum100, _sum101);
int32x4_t _s101 = vpaddq_s32(_sum110, _sum111);
_sum00 = vaddq_s32(_sum00, _s001);
_sum10 = vaddq_s32(_sum10, _s101);
}
#endif // __ARM_FEATURE_DOTPROD
int j = 0;
for (; j + 3 < nn1; j += 4)
{
int16x8_t _val01234567 = vmovl_s8(vld1_s8(tmpptr));
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w));
int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w));
int16x4_t _w0123 = vget_low_s16(_w01234567);
int16x4_t _w4567 = vget_high_s16(_w01234567);
int16x4_t _w89ab = vget_low_s16(_w89abcdef);
int16x4_t _wcdef = vget_high_s16(_w89abcdef);
_sum00 = vmlal_laneq_s16(_sum00, _w0123, _val01234567, 0);
_sum10 = vmlal_laneq_s16(_sum10, _w0123, _val01234567, 1);
_sum00 = vmlal_laneq_s16(_sum00, _w4567, _val01234567, 2);
_sum10 = vmlal_laneq_s16(_sum10, _w4567, _val01234567, 3);
_sum00 = vmlal_laneq_s16(_sum00, _w89ab, _val01234567, 4);
_sum10 = vmlal_laneq_s16(_sum10, _w89ab, _val01234567, 5);
_sum00 = vmlal_laneq_s16(_sum00, _wcdef, _val01234567, 6);
_sum10 = vmlal_laneq_s16(_sum10, _wcdef, _val01234567, 7);
tmpptr += 8;
kptr0 += 16;
}
for (; j < nn1; j++)
{
int16x4_t _val0 = vdup_n_s16(tmpptr[0]);
int16x4_t _val1 = vdup_n_s16(tmpptr[1]);
int16x4_t _w0123;
_w0123 = vset_lane_s16(kptr0[0], _w0123, 0);
_w0123 = vset_lane_s16(kptr0[1], _w0123, 1);
_w0123 = vset_lane_s16(kptr0[2], _w0123, 2);
_w0123 = vset_lane_s16(kptr0[3], _w0123, 3);
_sum00 = vmlal_s16(_sum00, _val0, _w0123);
_sum10 = vmlal_s16(_sum10, _val1, _w0123);
tmpptr += 2;
kptr0 += 4;
}
vst1q_lane_s32(outptr0, _sum00, 0);
vst1q_lane_s32(outptr1, _sum00, 1);
vst1q_lane_s32(outptr2, _sum00, 2);
vst1q_lane_s32(outptr3, _sum00, 3);
vst1q_lane_s32(outptr0 + 1, _sum10, 0);
vst1q_lane_s32(outptr1 + 1, _sum10, 1);
vst1q_lane_s32(outptr2 + 1, _sum10, 2);
vst1q_lane_s32(outptr3 + 1, _sum10, 3);
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
#else // __aarch64__
asm volatile(
"veor q0, q0 \n"
"veor q1, q1 \n"
"veor q2, q2 \n"
"veor q3, q3 \n"
"veor q4, q4 \n"
"veor q5, q5 \n"
"veor q6, q6 \n"
"veor q7, q7 \n"
"cmp %4, #0 \n"
"beq 3f \n"
"pld [%7, #256] \n"
"lsr r4, %4, #1 \n" // r4 = nn = size >> 1
"cmp r4, #0 \n"
"beq 1f \n"
"add r5, %8, #16 \n"
"pld [%8, #128] \n"
"mov r6, #32 \n"
"pld [%8, #384] \n"
"vld1.s8 {d20-d21}, [%8 :128], r6 \n" // _w01
"vld1.s8 {d16-d19}, [%7 :128]! \n" // _val0 _val1
"vld1.s8 {d22-d23}, [%8 :128], r6 \n" // _w45
"0: \n"
"vmull.s8 q12, d16, d20 \n"
"pld [%7, #256] \n"
"vmull.s8 q13, d16, d21 \n"
"pld [%8, #384] \n"
"vmull.s8 q14, d17, d20 \n"
"vmull.s8 q15, d17, d21 \n"
"vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23
"vmlal.s8 q12, d18, d22 \n"
"vmlal.s8 q13, d18, d23 \n"
"subs r4, r4, #1 \n"
"vmlal.s8 q14, d19, d22 \n"
"vmlal.s8 q15, d19, d23 \n"
"vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67
"vpadal.s16 q0, q12 \n"
"vmull.s8 q12, d16, d20 \n"
"vpadal.s16 q1, q13 \n"
"vmull.s8 q13, d16, d21 \n"
"vpadal.s16 q4, q14 \n"
"vmull.s8 q14, d17, d20 \n"
"vpadal.s16 q5, q15 \n"
"vmull.s8 q15, d17, d21 \n"
"vld1.s8 {d16-d17}, [%7 :128]! \n" // _val0
"vmlal.s8 q12, d18, d22 \n"
"vld1.s8 {d20-d21}, [%8 :128], r6 \n" // _w01
"vmlal.s8 q13, d18, d23 \n"
"pld [r5, #128] \n"
"vmlal.s8 q14, d19, d22 \n"
"pld [r5, #384] \n"
"vmlal.s8 q15, d19, d23 \n"
"vld1.s8 {d18-d19}, [%7 :128]! \n" // _val1
"vpadal.s16 q2, q12 \n"
"vld1.s8 {d22-d23}, [%8 :128], r6 \n" // _w45
"vpadal.s16 q3, q13 \n"
"pld [%7, #128] \n"
"vpadal.s16 q6, q14 \n"
"pld [%8, #128] \n"
"vpadal.s16 q7, q15 \n"
"bne 0b \n"
"sub %7, %7, #32 \n"
"sub %8, %8, #64 \n"
"1: \n"
"and r4, %4, #1 \n" // r4 = remain = size & 1
"cmp r4, #0 \n" // r4 > 0
"beq 2f \n"
"vld1.s8 {d16-d17}, [%7 :128]! \n" // _val
"vld1.s8 {d20-d21}, [%8 :128]! \n" // _w01
"vmull.s8 q12, d16, d20 \n"
"vld1.s8 {d22-d23}, [%8 :128]! \n" // _w23
"vmull.s8 q13, d16, d21 \n"
"vmull.s8 q14, d17, d20 \n"
"vmull.s8 q15, d17, d21 \n"
"vpadal.s16 q0, q12 \n"
"vmull.s8 q12, d16, d22 \n"
"vpadal.s16 q1, q13 \n"
"vmull.s8 q13, d16, d23 \n"
"vpadal.s16 q4, q14 \n"
"vmull.s8 q14, d17, d22 \n"
"vpadal.s16 q5, q15 \n"
"vmull.s8 q15, d17, d23 \n"
"vpadal.s16 q2, q12 \n"
"vpadal.s16 q3, q13 \n"
"vpadal.s16 q6, q14 \n"
"vpadal.s16 q7, q15 \n"
"2: \n"
"vpadd.s32 d16, d0, d1 \n"
"vpadd.s32 d17, d2, d3 \n"
"vpadd.s32 d18, d4, d5 \n"
"vpadd.s32 d19, d6, d7 \n"
"vpadd.s32 d20, d8, d9 \n"
"vpadd.s32 d21, d10, d11 \n"
"vpadd.s32 d22, d12, d13 \n"
"vpadd.s32 d23, d14, d15 \n"
"vpadd.s32 d0, d16, d17 \n"
"vpadd.s32 d1, d18, d19 \n"
"vpadd.s32 d2, d20, d21 \n"
"vpadd.s32 d3, d22, d23 \n"
"3: \n"
"cmp %5, #0 \n"
"beq 7f \n"
"veor q2, q2 \n"
"veor q3, q3 \n"
"veor q4, q4 \n"
"veor q5, q5 \n"
"lsr r4, %5, #1 \n" // r4 = nn4 >> 1
"cmp r4, #0 \n"
"beq 5f \n"
"4: \n"
"vld1.s8 {d16-d17}, [%7]! \n" // _val0123
"vld1.s8 {d20-d23}, [%8]! \n" // _w01 _w23
"vmov.s8 q9, q8 \n"
"vtrn.s32 q8, q9 \n" // _val00 _val22 _val11 _val33
"vmull.s8 q12, d16, d20 \n"
"vmull.s8 q13, d16, d21 \n"
"vmull.s8 q14, d18, d20 \n"
"vmull.s8 q15, d18, d21 \n"
"vmlal.s8 q12, d17, d22 \n"
"vmlal.s8 q13, d17, d23 \n"
"vmlal.s8 q14, d19, d22 \n"
"vmlal.s8 q15, d19, d23 \n"
"vpadal.s16 q2, q12 \n"
"vpadal.s16 q3, q13 \n"
"vpadal.s16 q4, q14 \n"
"vpadal.s16 q5, q15 \n"
"subs r4, r4, #1 \n"
"bne 4b \n"
"5: \n"
"and r4, %5, #1 \n" // r4 = nn4 & 1
"cmp r4, #0 \n" // r4 > 0
"beq 6f \n"
"vld1.s8 {d16}, [%7]! \n" // _val01
"vld1.s8 {d18-d19}, [%8]! \n" // _w01
"vmov.s8 d17, d16 \n"
"vtrn.s32 d16, d17 \n" // _val00 _val11
"vmull.s8 q12, d16, d18 \n"
"vmull.s8 q13, d16, d19 \n"
"vmull.s8 q14, d17, d18 \n"
"vmull.s8 q15, d17, d19 \n"
"vpadal.s16 q2, q12 \n"
"vpadal.s16 q3, q13 \n"
"vpadal.s16 q4, q14 \n"
"vpadal.s16 q5, q15 \n"
"6: \n"
"vpadd.s32 d16, d4, d5 \n"
"vpadd.s32 d17, d6, d7 \n"
"vpadd.s32 d18, d8, d9 \n"
"vpadd.s32 d19, d10, d11 \n"
"vadd.s32 q0, q0, q8 \n"
"vadd.s32 q1, q1, q9 \n"
"7: \n"
"lsr r4, %6, #2 \n" // r4 = nn1 >> 2
"cmp r4, #0 \n"
"beq 9f \n"
"8: \n"
"vld1.s8 {d4}, [%7]! \n"
"vmovl.s8 q2, d4 \n"
"vld1.s8 {d10-d11}, [%8]! \n"
"vmovl.s8 q3, d10 \n"
"vmovl.s8 q4, d11 \n"
"vmlal.s16 q0, d6, d4[0] \n"
"vmlal.s16 q1, d6, d4[1] \n"
"vmlal.s16 q0, d7, d4[2] \n"
"vmlal.s16 q1, d7, d4[3] \n"
"vmlal.s16 q0, d8, d5[0] \n"
"vmlal.s16 q1, d8, d5[1] \n"
"vmlal.s16 q0, d9, d5[2] \n"
"vmlal.s16 q1, d9, d5[3] \n"
"subs r4, r4, #1 \n"
"bne 8b \n"
"9: \n"
"and r4, %6, #3 \n" // r4 = nn1 & 3
"cmp r4, #0 \n" // w4 > 0
"beq 11f \n"
"10: \n"
"vld1.s8 {d4[]}, [%7]! \n"
"vld1.s8 {d6[]}, [%7]! \n"
"vmovl.s8 q2, d4 \n"
"vmovl.s8 q3, d6 \n"
"vld1.s8 {d8}, [%8] \n"
"vmovl.s8 q4, d8 \n"
"vmlal.s16 q0, d4, d8 \n"
"vmlal.s16 q1, d6, d8 \n"
"add %8, %8, #4 \n"
"subs r4, r4, #1 \n"
"bne 10b \n"
"11: \n"
"vst1.s32 {d0[0]}, [%0]! \n"
"vst1.s32 {d0[1]}, [%1]! \n"
"vst1.s32 {d1[0]}, [%2]! \n"
"vst1.s32 {d1[1]}, [%3]! \n"
"vst1.s32 {d2[0]}, [%0]! \n"
"vst1.s32 {d2[1]}, [%1]! \n"
"vst1.s32 {d3[0]}, [%2]! \n"
"vst1.s32 {d3[1]}, [%3]! \n"
: "=r"(outptr0),
"=r"(outptr1),
"=r"(outptr2),
"=r"(outptr3),
"=r"(nn),
"=r"(nn4),
"=r"(nn1),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(nn),
"5"(nn4),
"6"(nn1),
"7"(tmpptr),
"8"(kptr0)
: "memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x4_t _sum0 = vdupq_n_s32(0);
#if __ARM_FEATURE_DOTPROD
for (int j = 0; j < nn; j++)
{
int8x8_t _val0_l_h = vld1_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1);
tmpptr += 8;
kptr0 += 32;
}
if (nn4 > 0)
{
int j = 0;
for (; j + 1 < nn4; j += 2)
{
int8x8_t _val01 = vld1_s8(tmpptr);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _w0, _val01, 0);
int8x16_t _w1 = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_lane_s32(_sum0, _w1, _val01, 1);
tmpptr += 8;
kptr0 += 32;
}
for (; j < nn4; j++)
{
int8x8_t _val_xxx = vld1_s8(tmpptr);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _w0, _val_xxx, 0);
tmpptr += 4;
kptr0 += 16;
}
}
#else // __ARM_FEATURE_DOTPROD
if (nn > 0)
{
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01));
int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01));
int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23));
int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23));
int8x16_t _w45 = vld1q_s8(kptr0 + 32);
int8x16_t _w67 = vld1q_s8(kptr0 + 48);
_wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45));
_wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45));
_wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67));
_wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67));
_sum0 = vpadalq_s16(_sum0, _wv0);
_sum1 = vpadalq_s16(_sum1, _wv1);
_sum2 = vpadalq_s16(_sum2, _wv2);
_sum3 = vpadalq_s16(_sum3, _wv3);
tmpptr += 16;
kptr0 += 64;
}
for (; j < nn; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01));
int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01));
int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23));
int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23));
_sum0 = vpadalq_s16(_sum0, _wv0);
_sum1 = vpadalq_s16(_sum1, _wv1);
_sum2 = vpadalq_s16(_sum2, _wv2);
_sum3 = vpadalq_s16(_sum3, _wv3);
tmpptr += 8;
kptr0 += 32;
}
#if __aarch64__
int32x4_t _s01 = vpaddq_s32(_sum0, _sum1);
int32x4_t _s23 = vpaddq_s32(_sum2, _sum3);
_sum0 = vpaddq_s32(_s01, _s23);
#else
int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0));
int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1));
int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2));
int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3));
_sum0 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high));
#endif
}
if (nn4 > 0)
{
int32x4_t _sum10 = vdupq_n_s32(0);
int32x4_t _sum11 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn4; j += 2)
{
int8x8_t _val01 = vld1_s8(tmpptr);
int32x2x2_t _val0011 = vzip_s32(vreinterpret_s32_s8(_val01), vreinterpret_s32_s8(_val01));
int8x8_t _val00 = vreinterpret_s8_s32(_val0011.val[0]);
int8x8_t _val11 = vreinterpret_s8_s32(_val0011.val[1]);
int8x16_t _w0 = vld1q_s8(kptr0);
int8x16_t _w1 = vld1q_s8(kptr0 + 16);
int16x8_t _wv0 = vmull_s8(_val00, vget_low_s8(_w0));
int16x8_t _wv1 = vmull_s8(_val00, vget_high_s8(_w0));
_wv0 = vmlal_s8(_wv0, _val11, vget_low_s8(_w1));
_wv1 = vmlal_s8(_wv1, _val11, vget_high_s8(_w1));
_sum10 = vpadalq_s16(_sum10, _wv0);
_sum11 = vpadalq_s16(_sum11, _wv1);
tmpptr += 8;
kptr0 += 32;
}
for (; j < nn4; j++)
{
int8x8_t _val_xxx = vld1_s8(tmpptr);
int8x8_t _val_val = vreinterpret_s8_s32(vzip_s32(vreinterpret_s32_s8(_val_xxx), vreinterpret_s32_s8(_val_xxx)).val[0]);
int8x16_t _w0 = vld1q_s8(kptr0);
int16x8_t _wv0 = vmull_s8(_val_val, vget_low_s8(_w0));
int16x8_t _wv1 = vmull_s8(_val_val, vget_high_s8(_w0));
_sum10 = vpadalq_s16(_sum10, _wv0);
_sum11 = vpadalq_s16(_sum11, _wv1);
tmpptr += 4;
kptr0 += 16;
}
#if __aarch64__
int32x4_t _s01 = vpaddq_s32(_sum10, _sum11);
#else
int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum10), vget_high_s32(_sum10));
int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum11), vget_high_s32(_sum11));
int32x4_t _s01 = vcombine_s32(_s01_low, _s01_high);
#endif
_sum0 = vaddq_s32(_sum0, _s01);
}
#endif // __ARM_FEATURE_DOTPROD
int32x4_t _sum1 = vdupq_n_s32(0);
int j = 0;
for (; j + 3 < nn1; j += 4)
{
int16x4_t _val0123 = vget_low_s16(vmovl_s8(vld1_s8(tmpptr)));
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w));
int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w));
int16x4_t _w0123 = vget_low_s16(_w01234567);
int16x4_t _w4567 = vget_high_s16(_w01234567);
int16x4_t _w89ab = vget_low_s16(_w89abcdef);
int16x4_t _wcdef = vget_high_s16(_w89abcdef);
_sum0 = vmlal_lane_s16(_sum0, _w0123, _val0123, 0);
_sum1 = vmlal_lane_s16(_sum1, _w4567, _val0123, 1);
_sum0 = vmlal_lane_s16(_sum0, _w89ab, _val0123, 2);
_sum1 = vmlal_lane_s16(_sum1, _wcdef, _val0123, 3);
tmpptr += 4;
kptr0 += 16;
}
for (; j < nn1; j++)
{
int16x4_t _val = vdup_n_s16(tmpptr[0]);
int16x4_t _w0123;
_w0123 = vset_lane_s16(kptr0[0], _w0123, 0);
_w0123 = vset_lane_s16(kptr0[1], _w0123, 1);
_w0123 = vset_lane_s16(kptr0[2], _w0123, 2);
_w0123 = vset_lane_s16(kptr0[3], _w0123, 3);
_sum0 = vmlal_s16(_sum0, _val, _w0123);
tmpptr += 1;
kptr0 += 4;
}
_sum0 = vaddq_s32(_sum0, _sum1);
vst1q_lane_s32(outptr0, _sum0, 0);
vst1q_lane_s32(outptr1, _sum0, 1);
vst1q_lane_s32(outptr2, _sum0, 2);
vst1q_lane_s32(outptr3, _sum0, 3);
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
remain_outch_start += nn_outch << 2;
#endif // __ARM_NEON
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __ARM_NEON
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
for (; i + 15 < size; i += 16)
{
const signed char* tmpptr = tmp.channel(i / 16);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val4567_l = vld1q_s8(tmpptr + 16);
int8x16_t _val89ab_l = vld1q_s8(tmpptr + 32);
int8x16_t _valcdef_l = vld1q_s8(tmpptr + 48);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 64);
int8x16_t _val4567_h = vld1q_s8(tmpptr + 80);
int8x16_t _val89ab_h = vld1q_s8(tmpptr + 96);
int8x16_t _valcdef_h = vld1q_s8(tmpptr + 112);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0);
_sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0);
_sum2 = vdotq_lane_s32(_sum2, _val89ab_l, _w_lh, 0);
_sum3 = vdotq_lane_s32(_sum3, _valcdef_l, _w_lh, 0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_h, _w_lh, 1);
_sum1 = vdotq_lane_s32(_sum1, _val4567_h, _w_lh, 1);
_sum2 = vdotq_lane_s32(_sum2, _val89ab_h, _w_lh, 1);
_sum3 = vdotq_lane_s32(_sum3, _valcdef_h, _w_lh, 1);
tmpptr += 128;
kptr0 += 8;
}
if (nn4 > 0)
{
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
for (int j = 0; j < nn4; j++)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _val2 = vld1q_s8(tmpptr + 32);
int8x16_t _val3 = vld1q_s8(tmpptr + 48);
int8x8_t _w_0123_xxxx = vld1_s8(kptr0);
_sum4 = vdotq_lane_s32(_sum4, _val0, _w_0123_xxxx, 0);
_sum5 = vdotq_lane_s32(_sum5, _val1, _w_0123_xxxx, 0);
_sum6 = vdotq_lane_s32(_sum6, _val2, _w_0123_xxxx, 0);
_sum7 = vdotq_lane_s32(_sum7, _val3, _w_0123_xxxx, 0);
tmpptr += 64;
kptr0 += 4;
}
_sum0 = vaddq_s32(_sum0, _sum4);
_sum1 = vaddq_s32(_sum1, _sum5);
_sum2 = vaddq_s32(_sum2, _sum6);
_sum3 = vaddq_s32(_sum3, _sum7);
}
int j = 0;
for (; j < nn1; j++)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x8_t _w = vld1_dup_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w);
int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
tmpptr += 16;
kptr0 += 1;
}
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr0 + 4, _sum1);
vst1q_s32(outptr0 + 8, _sum2);
vst1q_s32(outptr0 + 12, _sum3);
outptr0 += 16;
}
for (; i + 7 < size; i += 8)
{
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
if (nn > 0)
{
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val4567_l = vld1q_s8(tmpptr + 16);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 32);
int8x16_t _val4567_h = vld1q_s8(tmpptr + 48);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0);
_sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0);
_sum2 = vdotq_lane_s32(_sum2, _val0123_h, _w_lh, 1);
_sum3 = vdotq_lane_s32(_sum3, _val4567_h, _w_lh, 1);
tmpptr += 64;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum2);
_sum1 = vaddq_s32(_sum1, _sum3);
}
if (nn4 > 0)
{
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn4; j++)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x8_t _w_0123_xxxx = vld1_s8(kptr0);
_sum2 = vdotq_lane_s32(_sum2, _val0, _w_0123_xxxx, 0);
_sum3 = vdotq_lane_s32(_sum3, _val1, _w_0123_xxxx, 0);
tmpptr += 32;
kptr0 += 4;
}
_sum0 = vaddq_s32(_sum0, _sum2);
_sum1 = vaddq_s32(_sum1, _sum3);
}
int j = 0;
for (; j < nn1; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x8_t _w = vld1_dup_s8(kptr0);
int16x8_t _s = vmull_s8(_val, _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s));
tmpptr += 8;
kptr0 += 1;
}
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr0 + 4, _sum1);
outptr0 += 8;
}
#endif // __ARM_FEATURE_DOTPROD
for (; i + 3 < size; i += 4)
{
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4);
#else
const signed char* tmpptr = tmp.channel(i / 4);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x4_t _sum0 = vdupq_n_s32(0);
if (nn > 0)
{
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum1 = vdupq_n_s32(0);
int j = 0;
for (; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 16);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0);
_sum1 = vdotq_lane_s32(_sum1, _val0123_h, _w_lh, 1);
tmpptr += 32;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum1);
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _val2 = vld1q_s8(tmpptr + 32);
int8x16_t _val3 = vld1q_s8(tmpptr + 48);
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w));
int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w));
int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), vget_low_s8(_w));
int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), vget_low_s8(_w));
_s0 = vmlal_s8(_s0, vget_low_s8(_val2), vget_high_s8(_w));
_s1 = vmlal_s8(_s1, vget_high_s8(_val2), vget_high_s8(_w));
_s2 = vmlal_s8(_s2, vget_low_s8(_val3), vget_high_s8(_w));
_s3 = vmlal_s8(_s3, vget_high_s8(_val3), vget_high_s8(_w));
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
_sum4 = vaddw_s16(_sum4, vget_low_s16(_s2));
_sum5 = vaddw_s16(_sum5, vget_high_s16(_s2));
_sum6 = vaddw_s16(_sum6, vget_low_s16(_s3));
_sum7 = vaddw_s16(_sum7, vget_high_s16(_s3));
tmpptr += 64;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x8_t _w = vld1_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), _w);
int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), _w);
int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), _w);
int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
_sum4 = vaddw_s16(_sum4, vget_low_s16(_s2));
_sum5 = vaddw_s16(_sum5, vget_high_s16(_s2));
_sum6 = vaddw_s16(_sum6, vget_low_s16(_s3));
_sum7 = vaddw_s16(_sum7, vget_high_s16(_s3));
tmpptr += 32;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum1);
_sum2 = vaddq_s32(_sum2, _sum3);
_sum4 = vaddq_s32(_sum4, _sum5);
_sum6 = vaddq_s32(_sum6, _sum7);
int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0));
int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2));
int32x2_t _s4 = vadd_s32(vget_low_s32(_sum4), vget_high_s32(_sum4));
int32x2_t _s6 = vadd_s32(vget_low_s32(_sum6), vget_high_s32(_sum6));
int32x2_t _ss0 = vpadd_s32(_s0, _s2);
int32x2_t _ss1 = vpadd_s32(_s4, _s6);
_sum0 = vcombine_s32(_ss0, _ss1);
#endif // __ARM_FEATURE_DOTPROD
}
int sum0123[4] = {0, 0, 0, 0};
if (nn4 > 0)
{
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum1 = vdupq_n_s32(0);
int j = 0;
for (; j < nn4; j++)
{
int8x16_t _val0123_lh = vld1q_s8(tmpptr);
int8x8_t _w_lh_xx = vld1_s8(kptr0);
_sum1 = vdotq_lane_s32(_sum1, _val0123_lh, _w_lh_xx, 0);
tmpptr += 16;
kptr0 += 4;
}
_sum0 = vaddq_s32(_sum0, _sum1);
#else // __ARM_FEATURE_DOTPROD
int j = 0;
for (; j < nn4; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char val2 = tmpptr[2];
signed char val3 = tmpptr[3];
signed char val4 = tmpptr[4];
signed char val5 = tmpptr[5];
signed char val6 = tmpptr[6];
signed char val7 = tmpptr[7];
signed char val8 = tmpptr[8];
signed char val9 = tmpptr[9];
signed char val10 = tmpptr[10];
signed char val11 = tmpptr[11];
signed char val12 = tmpptr[12];
signed char val13 = tmpptr[13];
signed char val14 = tmpptr[14];
signed char val15 = tmpptr[15];
signed char w0 = kptr0[0];
signed char w1 = kptr0[1];
signed char w2 = kptr0[2];
signed char w3 = kptr0[3];
sum0123[0] += val0 * w0;
sum0123[0] += val1 * w1;
sum0123[0] += val2 * w2;
sum0123[0] += val3 * w3;
sum0123[1] += val4 * w0;
sum0123[1] += val5 * w1;
sum0123[1] += val6 * w2;
sum0123[1] += val7 * w3;
sum0123[2] += val8 * w0;
sum0123[2] += val9 * w1;
sum0123[2] += val10 * w2;
sum0123[2] += val11 * w3;
sum0123[3] += val12 * w0;
sum0123[3] += val13 * w1;
sum0123[3] += val14 * w2;
sum0123[3] += val15 * w3;
tmpptr += 16;
kptr0 += 4;
}
#endif // __ARM_FEATURE_DOTPROD
}
int j = 0;
for (; j < nn1; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char val2 = tmpptr[2];
signed char val3 = tmpptr[3];
signed char w = kptr0[0];
sum0123[0] += val0 * w;
sum0123[1] += val1 * w;
sum0123[2] += val2 * w;
sum0123[3] += val3 * w;
tmpptr += 4;
kptr0 += 1;
}
_sum0 = vaddq_s32(_sum0, vld1q_s32(sum0123));
vst1q_s32(outptr0, _sum0);
outptr0 += 4;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x2_t _sum = vdup_n_s32(0);
if (nn > 0)
{
#if __ARM_FEATURE_DOTPROD
int32x2_t _sum0 = vdup_n_s32(0);
int32x2_t _sum1 = vdup_n_s32(0);
int j = 0;
for (; j < nn; j++)
{
int8x16_t _val01_lh = vld1q_s8(tmpptr);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdot_lane_s32(_sum0, vget_low_s8(_val01_lh), _w_lh, 0);
_sum1 = vdot_lane_s32(_sum1, vget_high_s8(_val01_lh), _w_lh, 1);
tmpptr += 16;
kptr0 += 8;
}
_sum = vadd_s32(_sum0, _sum1);
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w));
int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w));
_s0 = vmlal_s8(_s0, vget_low_s8(_val1), vget_high_s8(_w));
_s1 = vmlal_s8(_s1, vget_high_s8(_val1), vget_high_s8(_w));
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
tmpptr += 32;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x8_t _w = vld1_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w);
int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
tmpptr += 16;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum1);
_sum2 = vaddq_s32(_sum2, _sum3);
int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0));
int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2));
_sum = vpadd_s32(_s0, _s2);
#endif // __ARM_FEATURE_DOTPROD
}
int sum01[2] = {0, 0};
if (nn4 > 0)
{
int j = 0;
for (; j < nn4; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char val2 = tmpptr[2];
signed char val3 = tmpptr[3];
signed char val4 = tmpptr[4];
signed char val5 = tmpptr[5];
signed char val6 = tmpptr[6];
signed char val7 = tmpptr[7];
signed char w0 = kptr0[0];
signed char w1 = kptr0[1];
signed char w2 = kptr0[2];
signed char w3 = kptr0[3];
sum01[0] += val0 * w0;
sum01[0] += val1 * w1;
sum01[0] += val2 * w2;
sum01[0] += val3 * w3;
sum01[1] += val4 * w0;
sum01[1] += val5 * w1;
sum01[1] += val6 * w2;
sum01[1] += val7 * w3;
tmpptr += 8;
kptr0 += 4;
}
}
int j = 0;
for (; j < nn1; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char w = kptr0[0];
sum01[0] += val0 * w;
sum01[1] += val1 * w;
tmpptr += 2;
kptr0 += 1;
}
_sum = vadd_s32(_sum, vld1_s32(sum01));
vst1_s32(outptr0, _sum);
outptr0 += 2;
}
for (; i < size; i++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int sum = 0;
if (nn > 0)
{
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x2_t _sum1 = vdup_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w = vld1q_s8(kptr0);
_sum0 = vdotq_s32(_sum0, _val, _w);
tmpptr += 16;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x8_t _w = vld1_s8(kptr0);
_sum1 = vdot_s32(_sum1, _val, _w);
tmpptr += 8;
kptr0 += 8;
}
sum = vaddvq_s32(_sum0) + vaddv_s32(_sum1);
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _s8 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w));
_s8 = vmlal_s8(_s8, vget_high_s8(_val), vget_high_s8(_w));
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s8));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s8));
tmpptr += 16;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x8_t _w = vld1_s8(kptr0);
int16x8_t _s8 = vmull_s8(_val, _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s8));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s8));
tmpptr += 8;
kptr0 += 8;
}
int32x4_t _sum = vaddq_s32(_sum0, _sum1);
#if __aarch64__
sum = vaddvq_s32(_sum); // dot
#else
int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum));
_ss = vpadd_s32(_ss, _ss);
sum = vget_lane_s32(_ss, 0);
#endif
#endif // __ARM_FEATURE_DOTPROD
}
if (nn4 > 0)
{
int j = 0;
for (; j < nn4; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char val2 = tmpptr[2];
signed char val3 = tmpptr[3];
signed char w0 = kptr0[0];
signed char w1 = kptr0[1];
signed char w2 = kptr0[2];
signed char w3 = kptr0[3];
sum += val0 * w0;
sum += val1 * w1;
sum += val2 * w2;
sum += val3 * w3;
tmpptr += 4;
kptr0 += 4;
}
}
int j = 0;
for (; j < nn1; j++)
{
signed char val = tmpptr[0];
signed char w = kptr0[0];
sum += val * w;
tmpptr += 1;
kptr0 += 1;
}
outptr0[0] = sum;
outptr0 += 1;
}
#else // __ARM_NEON
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i);
const signed char* kptr0 = kernel.channel(p);
int nn1 = inch * maxk;
int sum = 0;
int j = 0;
for (; j < nn1; j++)
{
signed char val = tmpptr[0];
signed char w = kptr0[0];
sum += val * w;
tmpptr += 1;
kptr0 += 1;
}
outptr0[0] = sum;
outptr0 += 1;
}
#endif // __ARM_NEON
}
}
static void convolution_im2col_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
#if NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD
if (ncnn::cpu_support_arm_asimddp())
{
extern void convolution_im2col_sgemm_transform_kernel_int8_neon_arm82dot(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h);
convolution_im2col_sgemm_transform_kernel_int8_neon_arm82dot(_kernel, kernel_tm, inch, outch, kernel_w, kernel_h);
return;
}
#endif
const int maxk = kernel_w * kernel_h;
#if __ARM_NEON
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
// dst = 4a-4b-2-maxk-inch/8a-outch/4b (arm82)
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (outch >= 4)
{
if (inch >= 8)
kernel_tm.create(32 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, outch / 4 + outch % 4, (size_t)1u);
else if (inch >= 4)
kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t)1u);
else
kernel_tm.create(4 * maxk, inch, outch / 4 + outch % 4, (size_t)1u);
}
else
{
if (inch >= 8)
kernel_tm.create(8 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, outch, (size_t)1u);
else if (inch >= 4)
kernel_tm.create(4 * maxk, inch / 4 + inch % 4, outch, (size_t)1u);
else
kernel_tm.create(1 * maxk, inch, outch, (size_t)1u);
}
int q = 0;
for (; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
int p = 0;
for (; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
#if __ARM_FEATURE_DOTPROD
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
for (int i = 0; i < 4; i++)
{
for (int j = 4; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
#else
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
#endif
}
}
for (; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; p < inch; p++)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p);
g00[0] = k00[k];
g00++;
}
}
}
}
// TODO unroll 2
for (; q < outch; q++)
{
signed char* g00 = kernel_tm.channel(q / 4 + q % 4);
int p = 0;
for (; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
for (; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
for (; p < inch; p++)
{
for (int k = 0; k < maxk; k++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p);
g00[0] = k00[k];
g00++;
}
}
}
#else // __ARM_NEON
kernel_tm = _kernel.reshape(maxk, inch, outch);
#endif // __ARM_NEON
}
static void convolution_im2col_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
ptr[2] = sptr[stride_w * 2];
ptr[3] = sptr[stride_w * 3];
sptr += stride_w * 4;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
sptr += stride_w * 2;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_int8_neon(bottom_im2col, top_blob, kernel, opt);
}
|
GB_binop__rminus_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rminus_uint32
// A.*B function (eWiseMult): GB_AemultB__rminus_uint32
// A*D function (colscale): GB_AxD__rminus_uint32
// D*A function (rowscale): GB_DxB__rminus_uint32
// C+=B function (dense accum): GB_Cdense_accumB__rminus_uint32
// C+=b function (dense accum): GB_Cdense_accumb__rminus_uint32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_uint32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_uint32
// C=scalar+B GB_bind1st__rminus_uint32
// C=scalar+B' GB_bind1st_tran__rminus_uint32
// C=A+scalar GB_bind2nd__rminus_uint32
// C=A'+scalar GB_bind2nd_tran__rminus_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (y - x) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_UINT32 || GxB_NO_RMINUS_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rminus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rminus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rminus_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rminus_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rminus_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rminus_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__rminus_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rminus_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rminus_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rminus_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB_bind1st_tran__rminus_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB_bind2nd_tran__rminus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
task_wait.c | /* Based on A.13.4c, p182 of OMP 3.0 spec.
* Liao, 9/15/2008
*/
#include <stdio.h>
#include <assert.h>
unsigned long int input = 40;
unsigned long int fib(unsigned long int n)
{
unsigned long int i, j;
if (n<2)
return n;
else
{
#pragma omp task shared(i)
i=fib(n-1);
#pragma omp task shared(j)
j=fib(n-2);
#pragma omp taskwait
return i+j;
}
}
int main()
{
unsigned long int result = 0;
#pragma omp parallel
{
#pragma omp single
{
result = fib(input);
}
}
return 0;
}
|
expected_output.c | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <polybench.h>
#include "trmm.h"
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/*trmm.c: this file is part of PolyBench/C*/
/*Include polybench common header.*/
/*Include benchmark-specific header.*/
/*Array initialization.*/
static void init_array(int m, int n, double *alpha, double A[1000][1000], double B[1000][1200]) {
int i, j;
*alpha = 1.5;
for(i = 0; i < m; i++) {
for(j = 0; j < i; j++) {
A[i][j] = (double) ((i + j) % m) / m;
}
A[i][i] = 1.0;
for(j = 0; j < n; j++) {
B[i][j] = (double) ((n + (i - j)) % n) / n;
}
}
}
/*DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output.*/
static void print_array(int m, int n, double B[1000][1200]) {
int i, j;
fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "B");
for(i = 0; i < m; i++)
for(j = 0; j < n; j++) {
if((i * m + j) % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", B[i][j]);
}
fprintf(stderr, "\nend dump: %s\n", "B");
fprintf(stderr, "==END DUMP_ARRAYS==\n");
}
/*Main computational kernel. The whole function will be timed,
including the call and return.*/
static void kernel_trmm(int m, int n, double alpha, double A[1000][1000], double B[1000][1200]) {
int i, j, k;
/*************** Clava msgError **************
unsolved dependency for arrayAccess B use : RW
****************************************/
for(i = 0; i < m; i++) {
#pragma omp parallel for default(shared) private(j, k) firstprivate(n, i, m, alpha, A)
for(j = 0; j < n; j++) {
/*************** Clava msgError **************
unsolved dependency for arrayAccess B use : RW
****************************************/
for(k = i + 1; k < m; k++)
B[i][j] += A[k][i] * B[k][j];
B[i][j] = alpha * B[i][j];
}
}
}
int main(int argc, char **argv) {
/*Retrieve problem size.*/
int m = 1000;
int n = 1200;
/*Variable declaration/allocation.*/
double alpha;
double (*A)[1000][1000];
A = (double (*)[1000][1000]) polybench_alloc_data((1000 + 0) * (1000 + 0), sizeof(double));
;
double (*B)[1000][1200];
B = (double (*)[1000][1200]) polybench_alloc_data((1000 + 0) * (1200 + 0), sizeof(double));
;
/*Initialize array(s).*/
init_array(m, n, &alpha, *A, *B);
/*Start timer.*/
;
/*Run kernel.*/
kernel_trmm(m, n, alpha, *A, *B);
/*Stop and print timer.*/
;
;
/*Prevent dead-code elimination. All live-out data must be printed
by the function call in argument.*/
if(argc > 42 && !strcmp(argv[0], "")) print_array(m, n, *B);
/*Be clean.*/
free((void *) A);
;
free((void *) B);
;
return 0;
}
|
lastprivatemissing-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// x: not live-in, yes live-out
// outer scope
// loop-carried output-dependence: x=... : accept values based on loop variable; or not.
//Solution: Can be parallelized using lastprivate(x)
//
// Semantics of lastprivate (x)
// causes the corresponding original list item to be updated after the end of the region.
// The compiler/runtime copies the local value back to the shared one within the last iteration.
// Without lastprivate(x), there will be race condition for x.
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i,x;
int len = 10000;
if (argc>1)
len = atoi(argv[1]);
#pragma omp parallel for private (i)
for (i=0;i<len;i++)
x=i;
printf("x=%d",x);
return 0;
}
|
pprefix.c | #include "omp.h"
#include <math.h>
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include "pprefix.h"
typedef struct index_t_container
{
index_t data;
} data_t;
typedef void* generic_p;
#define SIZE 10000000
index_t *prefix_sum(index_t *x, index_t n)
{
//prefix sum happens _in place_. make sure not to free the array twice
index_t *t = malloc(sizeof(data_t) * n);
index_t i,j;
for (j = 0; j < log2(n); j++)
{
#pragma omp parallel private(i) //TODO: implement better
{
#pragma omp for
for (i = 1 << j; i < n; i++)
t[i] = x[i] + x[i - (1 << j)];
#pragma omp for
for (i = 1 << j; i < n; i++)
x[i] = t[i];
}
}
free(t);
return x;
}
filter_ret_t filter(generic_p *array, index_t length, predicate p)
{
index_t *bitmap, *bitsum;
generic_p *filtered;
bitmap = malloc(sizeof(index_t) * length);
#pragma omp parralel for
for(index_t i=0;i<length;i++)
{
bitmap[i] = p(array[i]);
}
bitsum = prefix_sum(bitmap, length);
index_t filtered_length = bitsum[length - 1];
filtered = malloc(sizeof(generic_p) * filtered_length);
if(bitsum[0] > 0) // edge index
{
filtered[0] = array[0];
}
#pragma omp parallel for
for (index_t i = 1; i < length; i++)
{
if(bitsum[i] > bitsum[i-1])
{
filtered[bitsum[i]-1] = array[i];
}
}
free(bitsum);
filter_ret_t ret = {filtered, filtered_length};
return ret;
}
bool even(generic_p a)
{
data_t* value = (data_t*) a;
return (value->data) % 2 == 0;
}
void test_psum()
{
bool assert_psum = true;
index_t *a = malloc(sizeof(index_t) * SIZE);
for (long i = 0; i < SIZE; i++)
a[i] = i + 1;
index_t *a_psum = prefix_sum(a, SIZE);
#pragma omp parallel for reduction(&: assert_psum)
for (index_t i = 1; i <= SIZE; i++)
{
index_t expected = (i * (i + 1)) / 2;
bool current_test = (a_psum[i - 1] == expected);
if (!current_test)
{
printf("test %d failed. expected %d, actual %d\n", i, expected, a_psum[i]);
}
assert_psum = assert_psum && current_test;
}
printf("assert_psum=%s\n", assert_psum ? "True" : "False");
free(a_psum);
}
void test_filter()
{
data_t **a = malloc(sizeof(data_t *) * SIZE);
for (index_t i = 0; i < SIZE; i++)
{
a[i] = malloc(sizeof(data_t));
a[i]->data = i + 1;
}
filter_ret_t a_filtered = filter((generic_p *)a, SIZE, even);
data_t **a_filterd_array = a_filtered.filtered_array;
index_t filtered_length = a_filtered.filtered_array_len;
bool assert_filter = true;
#pragma omp parallel for reduction(&& : assert_filter)
for (index_t i = 0; i < filtered_length; i++)
{
data_t expected = {2 * (i+1)};
data_t *actual = a_filterd_array[i];
bool current_test = actual->data == expected.data;
if (!current_test)
{
printf("test %d failed. expected %d, actual %d\n", i, expected.data, actual->data);
}
assert_filter = assert_filter && current_test;
}
printf("assert_filter=%s\n", assert_filter ? "True" : "False");
free(a_filterd_array);
for (index_t i = 0; i < SIZE; i++)
{
free(a[i]);
}
free(a);
}
// int main()
// {
// test_psum();
// test_filter();
// } |
remarks_parallel_in_multiple_target_state_machines.c | // RUN: %clang_cc1 -verify=host -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify=all,safe -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out
// RUN: %clang_cc1 -verify=all,safe -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out
// host-no-diagnostics
void baz(void) __attribute__((assume("omp_no_openmp")));
void bar1(void) {
#pragma omp parallel // #0
// safe-remark@#0 {{Parallel region is used in unknown ways. Will not attempt to rewrite the state machine. [OMP101]}}
{
}
}
void bar2(void) {
#pragma omp parallel // #1
// safe-remark@#1 {{Parallel region is used in unknown ways. Will not attempt to rewrite the state machine. [OMP101]}}
{
}
}
void foo1(void) {
#pragma omp target teams // #2
// all-remark@#2 {{Rewriting generic-mode kernel with a customized state machine. [OMP131]}}
{
baz(); // all-remark {{Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override. [OMP121]}}
#pragma omp parallel // #3
{
}
bar1();
#pragma omp parallel // #4
{
}
}
}
void foo2(void) {
#pragma omp target teams // #5
// all-remark@#5 {{Rewriting generic-mode kernel with a customized state machine. [OMP131]}}
{
baz(); // all-remark {{Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override. [OMP121]}}
#pragma omp parallel // #6
{
}
bar1();
bar2();
#pragma omp parallel // #7
{
}
bar1();
bar2();
}
}
void foo3(void) {
#pragma omp target teams // #8
// all-remark@#8 {{Rewriting generic-mode kernel with a customized state machine. [OMP131]}}
{
baz(); // all-remark {{Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override. [OMP121]}}
#pragma omp parallel // #9
{
}
bar1();
bar2();
#pragma omp parallel // #10
{
}
bar1();
bar2();
}
}
void spmd(void) {
// Verify we do not emit the remarks above for "SPMD" regions.
#pragma omp target teams
#pragma omp parallel
{
}
#pragma omp target teams distribute parallel for
for (int i = 0; i < 100; ++i) {
}
}
// all-remark@* 9 {{OpenMP runtime call __kmpc_global_thread_num deduplicated. [OMP170]}}
|
prepress.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS %
% P P R R E P P R R E SS SS %
% PPPP RRRR EEE PPPP RRRR EEE SSS SSS %
% P R R E P R R E SS SS %
% P R R EEEEE P R R EEEEE SSSSS SSSSS %
% %
% %
% MagickCore Prepress Methods %
% %
% Software Design %
% John Cristy %
% October 2001 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/prepress.h"
#include "magick/registry.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T o t a l I n k D e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageTotalInkDensity() returns the total ink density for a CMYK image.
% Total Ink Density (TID) is determined by adding the CMYK values in the
% darkest shadow area in an image.
%
% The format of the GetImageTotalInkDensity method is:
%
% double GetImageTotalInkDensity(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport double GetImageTotalInkDensity(Image *image)
{
CacheView
*image_view;
double
total_ink_density;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",image->filename);
return(0.0);
}
status=MagickTrue;
total_ink_density=0.0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
density;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
density=(double) GetPixelRed(p)+GetPixelGreen(p)+
GetPixelBlue(p)+GetPixelIndex(indexes+x);
if (density > total_ink_density)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageTotalInkDensity)
#endif
{
if (density > total_ink_density)
total_ink_density=density;
}
p++;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
total_ink_density=0.0;
return(total_ink_density);
}
|
jacobi_omp.c | /*
* Copyright (c) 2008, BSC (Barcelon Supercomputing Center)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BSC ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <time.h>
#define NB 64
#define B 64
#define FALSE (0)
#define TRUE (1)
typedef double fp_type;
typedef fp_type *vin;
typedef fp_type *vout;
typedef fp_type *bin;
typedef fp_type *binout;
fp_type *A[NB][NB];
fp_type *A_new[NB][NB];
fp_type *tmp[NB][NB];
void alloc_and_genmat()
{
int init_val, i, j, ii, jj;
fp_type *p, *p_new;
init_val = 1325;
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
A[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
A_new[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
tmp[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
if (A[ii][jj] == NULL || A_new[ii][jj] == NULL || tmp[ii][jj] == NULL)
{
printf("Out of memory\n");
exit(1);
}
p = A[ii][jj];
p_new = A_new[ii][jj];
for (i = 0; i < B; i++)
{
for (j = 0; j < B; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (fp_type)((init_val - 32768.0) / 16384.0);
(*p_new) = (*p);
p++;
p_new++;
}
}
}
}
}
long usecs(void)
{
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1000000 + t.tv_usec;
}
void clear(vout v)
{
int i, j, k;
for (i = 0; i < B; i++)
v[i] = (fp_type)0.0;
}
void getlastrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[(B - 1) * B + j];
}
void getlastcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + B - 1];
}
void getfirstrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[0 * B + j];
}
void getfirstcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + 0];
}
void jacobi(vin lefthalo, vin tophalo, vin righthalo, vin bottomhalo, bin A, binout A_new)
{
int i, j;
fp_type tmp;
fp_type left, top, right, bottom;
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
tmp = A[i * B + j];
left = (j == 0 ? lefthalo[j] : A[i * B + j - 1]);
top = (i == 0 ? tophalo[i] : A[(i - 1) * B + j]);
right = (j == B - 1 ? righthalo[i] : A[i * B + j + 1]);
bottom = (i == B - 1 ? bottomhalo[i] : A[(i + 1) * B + j]);
A_new[i * B + j] = 0.2 * (A[i * B + j] + left + top + right + bottom);
}
}
}
double maxdelta()
{
double dmax = -__DBL_MAX__;
int ii, jj, i, j;
#pragma omp parallel for schedule(static) reduction(max: dmax)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
double diff = fabs(A_new[ii][jj][i * B + j] - A[ii][jj][i * B + j]);
if(diff > dmax) dmax = diff;
}
}
}
}
return dmax;
}
void compute(int niters)
{
int iters;
int ii, jj;
fp_type lefthalo[B], tophalo[B], righthalo[B], bottomhalo[B];
double delta = 2.0;
double epsilon = 1e-7;
iters = 0;
// for (iters = 0; iters < niters; iters++)
while(iters < niters)
{
++iters;
#pragma omp parallel \
private(ii, jj, lefthalo, tophalo, righthalo, bottomhalo) \
shared(A, A_new)
{
#pragma omp for schedule(static)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
if (ii > 0)
getlastrow(A[ii - 1][jj], tophalo);
else
clear(tophalo);
if (jj > 0)
getlastcol(A[ii][jj - 1], lefthalo);
else
clear(lefthalo);
if (ii < NB - 1)
getfirstrow(A[ii + 1][jj], bottomhalo);
else
clear(bottomhalo);
if (jj < NB - 1)
getfirstcol(A[ii][jj + 1], righthalo);
else
clear(lefthalo);
jacobi(lefthalo, tophalo, righthalo, bottomhalo, A[ii][jj], A_new[ii][jj]);
} // jj
} // ii
} // end parallel
delta = maxdelta();
printf("iteration %d: delta = %e\n", iters, delta);
// yes, this is an inefficient copy
// however, the library version requires you to do a copy in this way
// on all of the component parts to avoid segmentation fault
#pragma omp parallel for schedule(static) shared(A, A_new)
for(int i = 0; i < NB; ++i)
{
for(int j = 0; j < NB; ++j)
{
for(int k = 0; k < B; ++k)
for(int l = 0; l < B; ++l)
A[i][j][k * B + l] = A_new[i][j][k * B + l];
}
}
} // iter
}
int main(int argc, char *argv[])
{
int niters;
// pp_time_t tm;
// memset( &tm, 0, sizeof(tm) );
struct timespec start, end;
if (argc > 1)
{
niters = atoi(argv[1]);
}
else
niters = 1;
alloc_and_genmat();
clock_gettime(CLOCK_MONOTONIC, &start);
compute(niters);
clock_gettime(CLOCK_MONOTONIC, &end);
double time_taken = (end.tv_sec - start.tv_sec) * 1e9;
time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9;
printf("Running time = %g %s\n", time_taken, "s");
/* FILE *outFile;
outFile = fopen("./jacobi_omp_values.txt", "w");
if (outFile == NULL)
{
fprintf(stderr, "Error writing to file\n");
}
else
{
int ii, jj, i, j;
for (ii = 0; ii < NB; ++ii)
for (jj = 0; jj < NB; ++jj)
for (i = 0; i < B; ++i)
for (j = 0; j < B; ++j)
fprintf(outFile, "%.15f\n", A[ii][jj][i * B + j]);
fclose(outFile);
} */
return 0;
} |
shear.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The XShearImage() and YShearImage() methods are based on the paper "A Fast
% Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics
% Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/shear.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% MagickBooleanType CropToFitImage(Image **image,
% const double x_shear,const double x_shear,
% const double width,const double height,
% const MagickBooleanType rotate,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CropToFitImage(Image **image,
const double x_shear,const double y_shear,
const double width,const double height,
const MagickBooleanType rotate,ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
register ssize_t
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=(ssize_t) ceil(min.x-0.5);
geometry.y=(ssize_t) ceil(min.y-0.5);
geometry.width=(size_t) floor(max.x-min.x+0.5);
geometry.height=(size_t) floor(max.y-min.y+0.5);
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The result will be auto-croped if the artifact "deskew:auto-crop" is
% defined, while the amount the image is to be deskewed, in degrees is also
% saved as the artifact "deskew:angle".
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void RadonProjection(const Image *image,MatrixInfo *source_matrixs,
MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection)
{
MatrixInfo
*swap;
register MatrixInfo
*p,
*q;
register ssize_t
x;
size_t
step;
p=source_matrixs;
q=destination_matrixs;
for (step=1; step < GetMatrixColumns(p); step*=2)
{
for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step)
{
register ssize_t
i;
ssize_t
y;
unsigned short
element,
neighbor;
for (i=0; i < (ssize_t) step; i++)
{
for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) GetMatrixRows(p); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,GetMatrixColumns(p),1)
#endif
for (x=0; x < (ssize_t) GetMatrixColumns(p); x++)
{
register ssize_t
y;
size_t
sum;
sum=0;
for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++)
{
ssize_t
delta;
unsigned short
element,
neighbor;
if (GetMatrixElement(p,x,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse)
continue;
delta=(ssize_t) element-(ssize_t) neighbor;
sum+=delta*delta;
}
projection[GetMatrixColumns(p)+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,size_t *projection,ExceptionInfo *exception)
{
CacheView
*image_view;
MatrixInfo
*destination_matrixs,
*source_matrixs;
MagickBooleanType
status;
size_t
count,
width;
ssize_t
j,
y;
unsigned char
c;
unsigned short
bits[256];
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
destination_matrixs=AcquireMatrixInfo(width,image->rows,
sizeof(unsigned short),exception);
if ((source_matrixs == (MatrixInfo *) NULL) ||
(destination_matrixs == (MatrixInfo *) NULL))
{
if (destination_matrixs != (MatrixInfo *) NULL)
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
if (source_matrixs != (MatrixInfo *) NULL)
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
if (NullMatrix(source_matrixs) == MagickFalse)
{
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
for (j=0; j < 256; j++)
{
c=(unsigned char) j;
for (count=0; c != 0; c>>=1)
count+=c & 0x01;
bits[j]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(ssize_t) (image->columns+7)/8;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,-1,projection);
(void) NullMatrix(source_matrixs);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,1,projection);
image_view=DestroyCacheView(image_view);
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const ssize_t offset,
ExceptionInfo *exception)
{
CacheView
*image_view;
PixelInfo
background;
double
count;
ssize_t
y;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetPixelInfo(image,&background);
count=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if ((y >= offset) && (y < ((ssize_t) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x >= offset) && (x < ((ssize_t) image->columns-offset)))
continue;
background.red+=QuantumScale*GetPixelRed(image,p);
background.green+=QuantumScale*GetPixelGreen(image,p);
background.blue+=QuantumScale*GetPixelBlue(image,p);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
background.alpha+=QuantumScale*GetPixelAlpha(image,p);
count++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=(double) ClampToQuantum(QuantumRange*
background.red/count);
image->background_color.green=(double) ClampToQuantum(QuantumRange*
background.green/count);
image->background_color.blue=(double) ClampToQuantum(QuantumRange*
background.blue/count);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->background_color.alpha=(double) ClampToQuantum(QuantumRange*
background.alpha/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
AffineMatrix
affine_matrix;
const char
*artifact;
double
degrees;
Image
*clone_image,
*crop_image,
*deskew_image,
*median_image;
MagickBooleanType
status;
RectangleInfo
geometry;
register ssize_t
i;
size_t
max_projection,
*projection,
width;
ssize_t
skew;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (size_t *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(size_t *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
skew=0;
for (i=0; i < (ssize_t) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(ssize_t) width+1;
max_projection=projection[i];
}
}
projection=(size_t *) RelinquishMagickMemory(projection);
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Deskew angle: %g",degrees);
/*
Deskew image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
{
char
angle[MagickPathExtent];
(void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees);
(void) SetImageArtifact(clone_image,"deskew:angle",angle);
}
(void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod,
exception);
affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0))));
affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.tx=0.0;
affine_matrix.ty=0.0;
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (IsStringTrue(artifact) == MagickFalse)
{
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
return(deskew_image);
}
/*
Auto-crop image.
*/
GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact),
exception);
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
if (deskew_image == (Image *) NULL)
return((Image *) NULL);
median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception);
if (median_image == (Image *) NULL)
{
deskew_image=DestroyImage(deskew_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
crop_image=CropImage(deskew_image,&geometry,exception);
deskew_image=DestroyImage(deskew_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,size_t rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations,
ExceptionInfo *exception)
{
#define RotateImageTag "Rotate/Image"
CacheView
*image_view,
*rotate_view;
Image
*rotate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
if (rotations == 0)
return(CloneImage(image,0,0,MagickTrue,exception));
if ((rotations == 1) || (rotations == 3))
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
else
rotate_image=CloneImage(image,0,0,MagickTrue,
exception);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
rotate_view=AcquireAuthenticCacheView(rotate_image,exception);
switch (rotations)
{
case 1:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 90 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t)
(rotate_image->columns-(tile_y+height)),y+tile_x,height,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels-=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
register ssize_t
y;
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y-
1),image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(rotate_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(rotate_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
proceed=SetImageProgress(image,RotateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 270 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+
rotate_image->rows-(tile_x+width)),height,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((width-1)-y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels+=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
default:
break;
}
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType XShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
y;
/*
X shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
background=image->background_color;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelInfo
pixel,
source,
destination;
double
area,
displacement;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=x_offset*GetPixelChannels(image);
displacement=degrees*(double) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
if ((x_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (x_offset+width+step-i) > image->columns)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_XShearImage)
#endif
proceed=SetImageProgress(image,XShearImageTag,progress++,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType YShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
x;
/*
Y Shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
background=image->background_color;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,width,1)
#endif
for (x=0; x < (ssize_t) width; x++)
{
ssize_t
step;
double
area,
displacement;
PixelInfo
pixel,
source,
destination;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=y_offset*GetPixelChannels(image);
displacement=degrees*(double) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
if ((y_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (y_offset+height+step-i) > image->rows)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_YShearImage)
#endif
proceed=SetImageProgress(image,YShearImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
MagickBooleanType
status;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0))));
shear.y=tan(DegreesToRadians(fmod(y_shear,360.0)));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute image size.
*/
bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5);
bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)-
image->columns)/2.0-0.5);
bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)-
image->rows)/2.0-0.5);
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
shear_image=BorderImage(integral_image,&border_info,image->compose,exception);
integral_image=DestroyImage(integral_image);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Shear the image.
*/
if (shear_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception);
status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x,
(ssize_t) (shear_image->rows-image->rows)/2,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t)
(shear_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType)
image->columns,(MagickRealType) image->rows,MagickFalse,exception);
shear_image->alpha_trait=image->alpha_trait;
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
if (status == MagickFalse)
shear_image=DestroyImage(shear_image);
return(shear_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearRotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. ShearRotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% ShearRotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a
% similar method based on the Paeth paper written by Michael Halle of the
% Spatial Imaging Group, MIT Media Lab.
%
% The format of the ShearRotateImage method is:
%
% Image *ShearRotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearRotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
MagickBooleanType
status;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
size_t
height,
rotations,
shear_width,
width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
if (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute maximum bounds for 3 shear operations.
*/
width=integral_image->columns;
height=integral_image->rows;
bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5);
bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5);
shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+
bounds.width+0.5);
bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width :
bounds.width-shear_width+2)/2.0+0.5);
bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5);
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
rotate_image=BorderImage(integral_image,&border_info,image->compose,
exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t)
(rotate_image->rows-height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t)
(rotate_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t)
(rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows-
bounds.height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
rotate_image->alpha_trait=image->alpha_trait;
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
|
likelihoods.h | /*!
* This file is part of GPBoost a C++ library for combining
* boosting with Gaussian process and mixed effects models
*
* Copyright (c) 2020 Fabio Sigrist. All rights reserved.
*
* Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information.
*/
#ifndef GPB_LIKELIHOODS_
#define GPB_LIKELIHOODS_
#define _USE_MATH_DEFINES // for M_SQRT1_2 and M_PI
#include <cmath>
#include <GPBoost/type_defs.h>
#include <GPBoost/sparse_matrix_utils.h>
#include <string>
#include <set>
#include <string>
#include <vector>
#include <cmath>
#include <LightGBM/utils/log.h>
using LightGBM::Log;
//Mathematical constants usually defined in cmath
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795029
#endif
//sqrt(2)
#ifndef M_SQRT2
#define M_SQRT2 1.41421356237309504880
#endif
//1/sqrt(2)
#ifndef M_SQRT1_2
#define M_SQRT1_2 0.707106781186547524401
#endif
//2/sqrt(pi)
#ifndef M_2_SQRTPI
#define M_2_SQRTPI 1.12837916709551257390
#endif
//#include <chrono> // only needed for debugging
//#include <thread> // only needed for debugging
//std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();//DELETE
//std::chrono::steady_clock::time_point begin, end;//DELETE
//double el_time;
//end = std::chrono::steady_clock::now();//DELETE
//el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging
//Log::REInfo("TOTAL TIME for mode calculation: %g", el_time);// Only for debugging
namespace GPBoost {
/*!
* \brief This class implements the likelihoods for the Gaussian proceses
* The template parameter <T_chol> can be either <chol_den_mat_t> or <chol_sp_mat_t>
*/
template<typename T_chol>//
class Likelihood {
public:
/*! \brief Constructor */
Likelihood();
/*!
* \brief Constructor
* \param likelihood Type of likelihood
*/
Likelihood(string_t type,
data_size_t num_data,
data_size_t num_re) {
string_t likelihood = ParseLikelihoodAlias(type);
if (SUPPORTED_LIKELIHOODS_.find(likelihood) == SUPPORTED_LIKELIHOODS_.end()) {
Log::REFatal("Likelihood of type '%s' is not supported.", likelihood.c_str());
}
likelihood_type_ = likelihood;
num_data_ = num_data;
num_re_ = num_re;
if (likelihood_type_ == "gamma") {
aux_pars_ = { 1. };//shape parameter, TODO: also estimate this parameter
}
}
/*!
* \brief Initialize mode vector_ (used in Laplace approximation for non-Gaussian data)
*/
void InitializeModeAvec() {
mode_ = vec_t::Zero(num_re_);
mode_previous_value_ = vec_t::Zero(num_re_);
mode_initialized_ = true;
first_deriv_ll_ = vec_t(num_data_);
second_deriv_neg_ll_ = vec_t(num_data_);
}
/*!
* \brief Reset mode to previous value. This is used if too large step-sizes are done which result in increases in the objective function.
" The values (covariance parameters and linear coefficients) are then discarded and consequently the mode should also be reset to the previous value)
*/
void ResetModeToPreviousValue() {
CHECK(mode_initialized_);
mode_ = mode_previous_value_;
}
/*! \brief Destructor */
~Likelihood() {
}
/*!
* \brief Returns the type of likelihood
*/
string_t GetLikelihood() const {
return(likelihood_type_);
}
/*!
* \brief Set the type of likelihood
* \param type Likelihood name
*/
void SetLikelihood(const string_t& type) {
string_t likelihood = ParseLikelihoodAlias(type);
if (SUPPORTED_LIKELIHOODS_.find(likelihood) == SUPPORTED_LIKELIHOODS_.end()) {
Log::REFatal("Likelihood of type '%s' is not supported.", likelihood.c_str());
}
likelihood_type_ = likelihood;
}
/*!
* \brief Returns the type of the response variable (label). Either "double" or "int"
*/
string_t label_type() const {
if (likelihood_type_ == "bernoulli_probit" || likelihood_type_ == "bernoulli_logit" ||
likelihood_type_ == "poisson") {
return("int");
}
else {
return("double");
}
}
/*!
* \brief Checks whether the response variables (labels) have the correct values
* \param y_data Response variable data
* \param num_data Number of data points
*/
template <typename T>//T can be double or float
void CheckY(const T* y_data, const data_size_t num_data) const {
if (likelihood_type_ == "bernoulli_probit" || likelihood_type_ == "bernoulli_logit") {
//#pragma omp parallel for schedule(static)//problematic with error message below...
for (data_size_t i = 0; i < num_data; ++i) {
if (fabs(y_data[i]) >= EPSILON_ && !AreSame<T>(y_data[i], 1.)) {
Log::REFatal("Response variable (label) data needs to be 0 or 1 for likelihood of type '%s'.", likelihood_type_.c_str());
}
}
}
else if (likelihood_type_ == "poisson") {
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data[i] < 0) {
Log::REFatal("Found negative response variable. Response variable cannot be negative for likelihood of type '%s'.", likelihood_type_.c_str());
}
else {
double intpart;
if (std::modf(y_data[i], &intpart) != 0.0) {
Log::REFatal("Found non-integer response variable. Response variable can only be integer valued for likelihood of type '%s'.", likelihood_type_.c_str());
}
}
}
}
else if (likelihood_type_ == "gamma") {
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data[i] < 0) {
Log::REFatal("Found negative response variable. Response variable cannot be negative for likelihood of type '%s'.", likelihood_type_.c_str());
}
}
}
}
/*!
* \brief Calculate normalizing constant for (log-)likelihood calculation
* \param y_data Response variable data
* \param num_data Number of data points
*/
template <typename T>//T can be double or int
void CalculateNormalizingConstant(const T* y_data, const data_size_t num_data) {
if (likelihood_type_ == "poisson") {
double log_normalizing_constant = 0.;
#pragma omp parallel for schedule(static) reduction(+:log_normalizing_constant)
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data[i] > 1) {
double log_factorial = 0.;
for (int k = 2; k <= y_data[i]; ++k) {
log_factorial += std::log(k);
}
log_normalizing_constant += log_factorial;
}
}
log_normalizing_constant_ = log_normalizing_constant;
}
else if (likelihood_type_ == "gamma") {
// //Currently not used since aux_pars_[0]==1 and thus log_normalizing_constant_==0
// double log_normalizing_constant = 0.;
//#pragma omp parallel for schedule(static) reduction(+:log_normalizing_constant)
// for (data_size_t i = 0; i < num_data; ++i) {
// log_normalizing_constant += -(aux_pars_[0] - 1.) * std::log(y_data[i]) - aux_pars_[0] * std::log(aux_pars_[0]) + std::tgamma(aux_pars_[0]);
// }
// log_normalizing_constant_ = log_normalizing_constant;
log_normalizing_constant_ = 0. * y_data[0];//y_data[0] is just a trick to avoid compiler warnings complaning about unreferenced parameters...
}
normalizing_constant_has_been_calculated_ = true;
}
/*!
* \brief Evaluate the log-likelihood conditional on the latent variable (=location_par)
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
*/
double LogLikelihood(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data) {
if (!normalizing_constant_has_been_calculated_) {
Log::REFatal("The normalizing constant has not been calculated. Call 'CalculateNormalizingConstant' first.");
}
double ll = 0.;
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data_int[i] == 0) {
ll += std::log(1 - normalCDF(location_par[i]));
}
else {
ll += std::log(normalCDF(location_par[i]));
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
ll += y_data_int[i] * location_par[i] - std::log(1 + std::exp(location_par[i]));
//Alternative version:
//if (y_data_int[i] == 0) {
// ll += std::log(1 - CondMeanLikelihood(location_par[i]));//CondMeanLikelihood = logistic function
//}
//else {
// ll += std::log(CondMeanLikelihood(location_par[i]));
//}
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
ll += y_data_int[i] * location_par[i] - std::exp(location_par[i]);
}
ll -= log_normalizing_constant_;
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
ll += -aux_pars_[0] * (location_par[i] + y_data[i] * std::exp(-location_par[i]));
}
ll -= log_normalizing_constant_;
}
return(ll);
}
/*!
* \brief Calculate the first derivative of the log-likelihood with respect to the location parameter
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
*/
void CalcFirstDerivLogLik(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data_int[i] == 0) {
first_deriv_ll_[i] = -normalPDF(location_par[i]) / (1 - normalCDF(location_par[i]));
}
else {
first_deriv_ll_[i] = normalPDF(location_par[i]) / normalCDF(location_par[i]);
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_ll_[i] = y_data_int[i] - CondMeanLikelihood(location_par[i]);//CondMeanLikelihood = logistic(x)
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_ll_[i] = y_data_int[i] - std::exp(location_par[i]);
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_ll_[i] = aux_pars_[0] * (y_data[i] * std::exp(-location_par[i]) - 1.);
}
}
}
/*!
* \brief Calculate the second derivative of the negative (!) log-likelihood with respect to the location parameter
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
*/
void CalcSecondDerivNegLogLik(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double dnorm = normalPDF(location_par[i]);
double pnorm = normalCDF(location_par[i]);
if (y_data_int[i] == 0) {
double dnorm_frac_one_min_pnorm = dnorm / (1. - pnorm);
second_deriv_neg_ll_[i] = -dnorm_frac_one_min_pnorm * (location_par[i] - dnorm_frac_one_min_pnorm);
}
else {
double dnorm_frac_pnorm = dnorm / pnorm;
second_deriv_neg_ll_[i] = dnorm_frac_pnorm * (location_par[i] + dnorm_frac_pnorm);
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double exp_loc_i = std::exp(location_par[i]);
second_deriv_neg_ll_[i] = exp_loc_i * std::pow(1. + exp_loc_i, -2);
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
second_deriv_neg_ll_[i] = std::exp(location_par[i]);
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
second_deriv_neg_ll_[i] = aux_pars_[0] * y_data[i] * std::exp(-location_par[i]);
}
}
}
/*!
* \brief Calculate the third derivative of the log-likelihood with respect to the location parameter
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
* \param[out] third_deriv Third derivative of the log-likelihood with respect to the location parameter. Need to pre-allocate memory of size num_data
*/
void CalcThirdDerivLogLik(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data, double* third_deriv) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double dnorm = normalPDF(location_par[i]);
double pnorm = normalCDF(location_par[i]);
if (y_data_int[i] == 0) {
double dnorm_frac_one_min_pnorm = dnorm / (1. - pnorm);
third_deriv[i] = dnorm_frac_one_min_pnorm * (1 - location_par[i] * location_par[i] +
dnorm_frac_one_min_pnorm * (3 * location_par[i] - 2 * dnorm_frac_one_min_pnorm));
}
else {
double dnorm_frac_pnorm = dnorm / pnorm;
third_deriv[i] = dnorm_frac_pnorm * (location_par[i] * location_par[i] - 1 +
dnorm_frac_pnorm * (3 * location_par[i] + 2 * dnorm_frac_pnorm));
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double exp_loc_i = std::exp(location_par[i]);
third_deriv[i] = -exp_loc_i * (1. - exp_loc_i) * std::pow(1 + exp_loc_i, -3);
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
third_deriv[i] = -std::exp(location_par[i]);
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
third_deriv[i] = aux_pars_[0] * y_data[i] * std::exp(-location_par[i]);
}
}
}
/*!
* \brief Calculate the mean of the likelihood conditional on the (predicted) latent variable
* Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable
*/
inline double CondMeanLikelihood(const double value) const {
if (likelihood_type_ == "gaussian") {
return value;
}
else if (likelihood_type_ == "bernoulli_probit") {
return normalCDF(value);
}
else if (likelihood_type_ == "bernoulli_logit") {
return 1. / (1. + std::exp(-value));
}
else if (likelihood_type_ == "poisson") {
return std::exp(value);
}
else if (likelihood_type_ == "gamma") {
return std::exp(value);
}
else {
Log::REFatal("CondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str());
return 0.;
}
}
/*!
* \brief Calculate the first derivative of the logarithm of the mean of the likelihood conditional on the (predicted) latent variable
* Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable
*/
inline double FirstDerivLogCondMeanLikelihood(const double value) const {
if (likelihood_type_ == "bernoulli_logit") {
return 1. / (1. + std::exp(value));
}
else if (likelihood_type_ == "poisson") {
return 1.;
}
else if (likelihood_type_ == "gamma") {
return 1.;
}
else {
Log::REFatal("FirstDerivLogCondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str());
return 0.;
}
}
/*!
* \brief Calculate the second derivative of the logarithm of the mean of the likelihood conditional on the (predicted) latent variable
* Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable
*/
inline double SecondDerivLogCondMeanLikelihood(const double value) const {
if (likelihood_type_ == "bernoulli_logit") {
double exp_x = std::exp(value);
return -exp_x / ((1. + exp_x) * (1. + exp_x));
}
else if (likelihood_type_ == "poisson") {
return 0.;
}
else if (likelihood_type_ == "gamma") {
return 0.;
}
else {
Log::REFatal("SecondDerivLogCondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str());
return 0.;
}
}
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood..
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
template <typename T_mat>//T_mat can be either den_mat_t or sp_mat_t
void FindModePostRandEffCalcMLLStable(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> ZSigmaZt,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
bool no_fixed_effects = (fixed_effects == nullptr);
vec_t location_par;
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
if (no_fixed_effects) {
approx_marginal_ll = LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
double approx_marginal_ll_new;
vec_t rhs, v_aux;//auxiliary variables
sp_mat_t Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood)
Wsqrt.setIdentity();
T_mat Id(num_data, num_data);
Id.setIdentity();
T_mat Id_plus_Wsqrt_ZSigmaZt_Wsqrt;
// Start finding mode
int it;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
// Calculate Cholesky factor of matrix B = Id + Wsqrt * Z*Sigma*Zt * Wsqrt
Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
Id_plus_Wsqrt_ZSigmaZt_Wsqrt = Id + Wsqrt * (*ZSigmaZt) * Wsqrt;
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.compute(Id_plus_Wsqrt_ZSigmaZt_Wsqrt);
// Update mode and a_vec_
rhs = second_deriv_neg_ll_.asDiagonal() * mode_ + first_deriv_ll_;
v_aux = Wsqrt * (*ZSigmaZt) * rhs;
a_vec_ = rhs - Wsqrt * (chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.solve(v_aux));
mode_ = (*ZSigmaZt) * a_vec_;
// Calculate new objective function
if (no_fixed_effects) {
approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) {
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations");
}
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
Id_plus_Wsqrt_ZSigmaZt_Wsqrt = Id + Wsqrt * (*ZSigmaZt) * Wsqrt;
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.compute(Id_plus_Wsqrt_ZSigmaZt_Wsqrt);
approx_marginal_ll -= ((den_mat_t)chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL()).diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLStable");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("a");
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("a[%d]: %g", i, a_vec_[i]);
//}
}//end FindModePostRandEffCalcMLLStable
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done on the random effects (b) scale and not the "data scale" (Zb) using
* a numerically stable variant based on factorizing ("inverting") B = (Id + ZtWZsqrt * Sigma * ZtWZsqrt).
* This version is used for the Laplace approximation when there is only one Gaussian process and
* there are a lot of multiple observations at the same location, i.e., the dimenion of the random effects b is much smaller than Zb
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
template <typename T_mat>//T_mat can be either den_mat_t or sp_mat_t
void FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> Sigma,
const data_size_t * const random_effects_indices_of_data,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
double approx_marginal_ll_new;
vec_t diag_sqrt_ZtWZ(num_re_);//sqrt of diagonal matrix ZtWZ
T_mat Id(num_re_, num_re_);
Id.setIdentity();
T_mat Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt;
vec_t rhs, v_aux;
int it;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
// Calculate right hand side for mode update
diag_sqrt_ZtWZ.setZero();
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_sqrt_ZtWZ[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
//Old non-parallel version
//for (data_size_t i = 0; i < num_data; ++i) {
// diag_sqrt_ZtWZ[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
//}
rhs = (diag_sqrt_ZtWZ.array() * mode_.array()).matrix();//rhs = ZtWZ * mode_ + Zt * first_deriv_ll_ for updating mode
#pragma omp parallel
{
vec_t rhs_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
rhs_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
rhs[i_re] += rhs_private[i_re];
}
}//end omp critical
}//end omp parallel
////Old non-parallel version
//for (data_size_t i = 0; i < num_data; ++i) {
// rhs[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
//}
// Calculate Cholesky factor of matrix B = Id + ZtWZsqrt * Sigma * ZtWZsqrt
diag_sqrt_ZtWZ.array() = diag_sqrt_ZtWZ.array().sqrt();
Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt = Id + diag_sqrt_ZtWZ.asDiagonal() * (*Sigma) * diag_sqrt_ZtWZ.asDiagonal();
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.compute(Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt);
// Update mode and a_vec_
v_aux = diag_sqrt_ZtWZ.asDiagonal() * (*Sigma) * rhs;
a_vec_ = rhs - diag_sqrt_ZtWZ.asDiagonal() * (chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.solve(v_aux));
mode_ = (*Sigma) * a_vec_;
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Calculate new objective function
approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) {
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations");
}
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
diag_sqrt_ZtWZ.setZero();
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_sqrt_ZtWZ[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
diag_sqrt_ZtWZ.array() = diag_sqrt_ZtWZ.array().sqrt();
Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt = Id + diag_sqrt_ZtWZ.asDiagonal() * (*Sigma) * diag_sqrt_ZtWZ.asDiagonal();
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.compute(Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt);
approx_marginal_ll -= ((den_mat_t)chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL()).diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("a");
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("a[%d]: %g", i, a_vec_[i]);
//}
}//end FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX
* This version is used for the Laplace approximation when there are only grouped random effects.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix
* \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLGroupedRE(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& SigmaI,
const sp_mat_t& Zt,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
sp_mat_t Z = Zt.transpose();
vec_t location_par = Z * mode_;//location parameter = mode of random effects + fixed effects
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] += fixed_effects[i];
}
}
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
double approx_marginal_ll_new;
sp_mat_t SigmaI_plus_ZtWZ;
vec_t rhs;
// Start finding mode
int it;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
// Calculate Cholesky factor and update mode
rhs = Zt * first_deriv_ll_ - SigmaI * mode_;//right hand side for updating mode
SigmaI_plus_ZtWZ = SigmaI + Zt * second_deriv_neg_ll_.asDiagonal() * Z;
chol_fact_SigmaI_plus_ZtWZ_.compute(SigmaI_plus_ZtWZ);
mode_ += chol_fact_SigmaI_plus_ZtWZ_.solve(rhs);
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
location_par = Z * mode_;
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] += fixed_effects[i];
}
}
// Calculate new objective function
approx_marginal_ll_new = -0.5 * (mode_.dot(SigmaI * mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) {
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}//end mode finding algorithm
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations");
}
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
SigmaI_plus_ZtWZ = SigmaI + Zt * second_deriv_neg_ll_.asDiagonal() * Z;
chol_fact_SigmaI_plus_ZtWZ_.compute(SigmaI_plus_ZtWZ);
approx_marginal_ll += -((den_mat_t)chol_fact_SigmaI_plus_ZtWZ_.matrixL()).diagonal().array().log().sum() + 0.5 * SigmaI.diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLGroupedRE");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//double approx_marginal_ll_1 = -0.5 * (mode_.dot(SigmaI * mode_));
//double approx_marginal_ll_2 = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
//double approx_marginal_ll_3 = 0.5 * diag_SigmaI_plus_ZtWZ_.array().log().sum() - 0.5 * SigmaI.diagonal().array().log().sum();
//Log::REInfo("approx_marginal_ll_1: %g", approx_marginal_ll_1);
//Log::REInfo("approx_marginal_ll_2: %g", approx_marginal_ll_2);
//Log::REInfo("approx_marginal_ll_3: %g", approx_marginal_ll_3);
//std::this_thread::sleep_for(std::chrono::milliseconds(200));
}//end FindModePostRandEffCalcMLLGroupedRE
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param sigma2 Variance of random effects
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const double sigma2,
const data_size_t* const random_effects_indices_of_data,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
double approx_marginal_ll_new;
vec_t rhs;
diag_SigmaI_plus_ZtWZ_ = vec_t(num_re_);
// Start finding mode
int it;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
// Calculate rhs for mode update
rhs = - mode_ / sigma2;//right hand side for updating mode
#pragma omp parallel
{
vec_t rhs_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
rhs_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
rhs[i_re] += rhs_private[i_re];
}
}//end omp critical
}//end omp parallel
// Update mode
diag_SigmaI_plus_ZtWZ_.setZero();
#pragma omp parallel
{
vec_t diag_SigmaI_plus_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_SigmaI_plus_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_SigmaI_plus_ZtWZ_[i_re] += diag_SigmaI_plus_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
diag_SigmaI_plus_ZtWZ_.array() += 1. / sigma2;
mode_ += (rhs.array() / diag_SigmaI_plus_ZtWZ_.array()).matrix();
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Calculate new objective function
approx_marginal_ll_new = -0.5 / sigma2 * (mode_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) {
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}//end mode finding algorithm
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations");
}
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
diag_SigmaI_plus_ZtWZ_.setZero();
#pragma omp parallel
{
vec_t diag_SigmaI_plus_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_SigmaI_plus_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_SigmaI_plus_ZtWZ_[i_re] += diag_SigmaI_plus_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
diag_SigmaI_plus_ZtWZ_.array() += 1. / sigma2;
approx_marginal_ll -= 0.5 * diag_SigmaI_plus_ZtWZ_.array().log().sum() + 0.5 * num_re_ * std::log(sigma2);
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//std::this_thread::sleep_for(std::chrono::milliseconds(200));
}//end FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor
* of Sigma^-1 has previously been calculated using a Vecchia approximation.
* This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used.
* Caveat: Sigma^-1 + W can be not very sparse
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor)
* \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLVecchia(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& B,
const sp_mat_t& D_inv,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
bool no_fixed_effects = (fixed_effects == nullptr);
sp_mat_t SigmaI = B.transpose() * D_inv * B;
vec_t location_par;//location parameter = mode of random effects + fixed effects
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
if (no_fixed_effects) {
approx_marginal_ll = LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
double approx_marginal_ll_new;
sp_mat_t SigmaI_plus_W;
vec_t rhs, B_mode;
// Start finding mode
int it;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
// Calculate Cholesky factor and update mode
rhs = second_deriv_neg_ll_.asDiagonal() * mode_ + first_deriv_ll_;//right hand side for updating mode
SigmaI_plus_W = SigmaI;
SigmaI_plus_W.diagonal().array() += second_deriv_neg_ll_.array();
//Log::REInfo("Number non zeros = %d", (int)SigmaI_plus_W.nonZeros());//only for debugging, can be deleted
chol_fact_SigmaI_plus_ZtWZ_.compute(SigmaI_plus_W);//This is usually the bottleneck
mode_ = chol_fact_SigmaI_plus_ZtWZ_.solve(rhs);
// Calculate new objective function
B_mode = B * mode_;
if (no_fixed_effects) {
approx_marginal_ll_new = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll_new = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) / std::abs(approx_marginal_ll) < DELTA_REL_CONV_) {
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug("Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations");
}
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
SigmaI_plus_W = SigmaI;
SigmaI_plus_W.diagonal().array() += second_deriv_neg_ll_.array();
chol_fact_SigmaI_plus_ZtWZ_.compute(SigmaI_plus_W);
approx_marginal_ll += -((den_mat_t)chol_fact_SigmaI_plus_ZtWZ_.matrixL()).diagonal().array().log().sum() + 0.5 * D_inv.diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//std::this_thread::sleep_for(std::chrono::milliseconds(200));
}//end FindModePostRandEffCalcMLLVecchia
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param re_comps_cluster_i Vector with different random effects components. We pass the component pointers to save memory in order to avoid passing a large collection of gardient covariance matrices in memory//TODO: better way than passing this? (relying on all gradients in a vector can lead to large memory consumption)
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
template <typename T_mat>//T_mat can be either den_mat_t or sp_mat_t
void CalcGradNegMargLikelihoodLAApproxStable(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> ZSigmaZt,
const std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLStable<T_mat>(y_data, y_data_int, fixed_effects, num_data, ZSigmaZt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
bool no_fixed_effects = (fixed_effects == nullptr);
vec_t location_par;//location parameter = mode of random effects + fixed effects
sp_mat_t Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood)
Wsqrt.setIdentity();
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
if (no_fixed_effects) {
CalcThirdDerivLogLik(y_data, y_data_int, mode_.data(), num_data, third_deriv.data());
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
}
Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
T_mat L = chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL();
T_mat L_inv_Wsqrt, WI_plus_Sigma_inv, C;
CalcLInvH(L, Wsqrt, L_inv_Wsqrt, true);//L_inv_Wsqrt = L\Wsqrt
C = L_inv_Wsqrt * (*ZSigmaZt);
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
//CalcLInvH(L, L_inv_Wsqrt, WI_plus_Sigma_inv, false);//WI_plus_Sigma_inv = Wsqrt * L^T\(L\Wsqrt) = (W^-1 + Sigma)^-1
//WI_plus_Sigma_inv = Wsqrt * WI_plus_Sigma_inv;
WI_plus_Sigma_inv = L_inv_Wsqrt.transpose() * L_inv_Wsqrt;//WI_plus_Sigma_inv = Wsqrt * L^T\(L\Wsqrt) = (W^-1 + Sigma)^-1
// calculate gradient of approx. marginal log-likelihood wrt the mode
// note: use (i) (Sigma^-1 + W)^-1 = Sigma - Sigma*(W^-1 + Sigma)^-1*Sigma = ZSigmaZt - C^T*C and (ii) "Z=Id"
vec_t d_mll_d_mode = (-0.5 * ((*ZSigmaZt).diagonal() - ((T_mat)(C.transpose() * C)).diagonal()).array() * third_deriv.array()).matrix();
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par
int par_count = 0;
double explicit_derivative;
for (int j = 0; j < (int)re_comps_cluster_i.size(); ++j) {
for (int ipar = 0; ipar < re_comps_cluster_i[j]->NumCovPar(); ++ipar) {
std::shared_ptr<T_mat> SigmaDeriv = re_comps_cluster_i[j]->GetZSigmaZtGrad(ipar, true, 1.);
// calculate explicit derivative of approx. mariginal log-likelihood
explicit_derivative = -0.5 * (double)(a_vec_.transpose() * (*SigmaDeriv) * a_vec_) + 0.5 * (WI_plus_Sigma_inv.cwiseProduct(*SigmaDeriv)).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
v_aux = (*SigmaDeriv) * first_deriv_ll_;
d_mode_d_par = (v_aux.array() - ((*ZSigmaZt) * WI_plus_Sigma_inv * v_aux).array()).matrix();
cov_grad[par_count] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
par_count++;
}
}
////Only for debugging
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < par_count; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
T_mat ZSigmaZtI_plus_W_inv = (*ZSigmaZt) - (T_mat)(C.transpose() * C);// = (ZSigmaZt^-1 + W) ^ -1
// calculate gradient of approx. marginal likeligood wrt the mode
vec_t d_mll_d_mode = (-0.5 * ZSigmaZtI_plus_W_inv.diagonal().array() * third_deriv.array()).matrix();//Note: d_mll_d_mode = d_detmll_d_F
//T_mat ZSigmaZtI_plus_W_inv_W = ZSigmaZtI_plus_W_inv * second_deriv_neg_ll_.asDiagonal();//DELETE
//fixed_effect_grad = -first_deriv_ll_ + d_mll_d_mode - d_mll_d_mode.transpose() * ZSigmaZtI_plus_W_inv_W;//DELETE
vec_t d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W = d_mll_d_mode.transpose() * ZSigmaZtI_plus_W_inv * second_deriv_neg_ll_.asDiagonal();
fixed_effect_grad = -first_deriv_ll_ + d_mll_d_mode - d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W;
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxStable
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done on the random effects (b) scale and not the "data scale" (Zb) using
* a numerically stable variant based on factorizing ("inverting") B = (Id + ZtWZsqrt * Sigma * ZtWZsqrt).
* This version is used for the Laplace approximation when there is only one Gaussian process and
* there are a lot of multiple observations at the same location, i.e., the dimenion of the random effects b is much smaller than Zb
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param re_comps_cluster_i Vector with different random effects components. We pass the component pointers to save memory in order to avoid passing a large collection of gardient covariance matrices in memory//TODO: better way than passing this? (relying on all gradients in a vector can lead to large memory consumption)
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
template <typename T_mat>//T_mat can be either den_mat_t or sp_mat_t
void CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> Sigma,
const data_size_t* const random_effects_indices_of_data,
const std::vector<std::shared_ptr<RECompBase<T_mat>>> & re_comps_cluster_i,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t & fixed_effect_grad,
bool calc_mode = false) {
CHECK(re_comps_cluster_i.size() == 1);
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale<T_mat>(y_data, y_data_int, fixed_effects, num_data,
Sigma, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Matrix ZtWZsqrt
vec_t diag_ZtWZ = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtWZ.array()[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
sp_mat_t ZtWZsqrt(num_re_, num_re_);//diagonal matrix with square root of diagonal of ZtWZ
ZtWZsqrt.setIdentity();
ZtWZsqrt.diagonal().array() = diag_ZtWZ.array().sqrt();
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
vec_t diag_ZtThirdDerivZ(num_re_);//sqrt of diagonal matrix ZtWZ
diag_ZtThirdDerivZ.setZero();
#pragma omp parallel
{
vec_t diag_ZtThirdDerivZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_ZtThirdDerivZ_private[random_effects_indices_of_data[i]] += third_deriv[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtThirdDerivZ[i_re] += diag_ZtThirdDerivZ_private[i_re];
}
}//end omp critical
}//end omp parallel
T_mat L = chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL();
T_mat L_inv_ZtWZsqrt, ZtWZI_Sigma_inv, C;
CalcLInvH(L, ZtWZsqrt, L_inv_ZtWZsqrt, true);//L_inv_ZtWZsqrt = L\ZtWZsqrt
C = L_inv_ZtWZsqrt * (*Sigma);
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
vec_t ZtFirstDeriv(num_re_);//sqrt of diagonal matrix ZtWZ
ZtFirstDeriv.setZero();
#pragma omp parallel
{
vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re];
}
}//end omp critical
}//end omp parallel
ZtWZI_Sigma_inv = L_inv_ZtWZsqrt.transpose() * L_inv_ZtWZsqrt;//ZtWZI_Sigma_inv = ZtWZsqrt * L^T\(L\ZtWZsqrt) = ((ZtWZ)^-1 + Sigma)^-1
// calculate gradient of approx. marginal log-likelihood wrt the mode
// note: use (i) (Sigma^-1 + W)^-1 = Sigma - Sigma*(W^-1 + Sigma)^-1*Sigma = ZSigmaZt - C^T*C
vec_t d_mll_d_mode = (-0.5 * ((*Sigma).diagonal() - ((T_mat)(C.transpose() * C)).diagonal()).array() * diag_ZtThirdDerivZ.array()).matrix();
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par
int par_count = 0;
double explicit_derivative;
for (int j = 0; j < (int)re_comps_cluster_i.size(); ++j) {
for (int ipar = 0; ipar < re_comps_cluster_i[j]->NumCovPar(); ++ipar) {
std::shared_ptr<T_mat> SigmaDeriv = re_comps_cluster_i[j]->GetZSigmaZtGrad(ipar, true, 1.);
// calculate explicit derivative of approx. mariginal log-likelihood
explicit_derivative = -0.5 * (double)(a_vec_.transpose() * (*SigmaDeriv) * a_vec_) +
0.5 * (ZtWZI_Sigma_inv.cwiseProduct(*SigmaDeriv)).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
v_aux = (*SigmaDeriv) * ZtFirstDeriv;
d_mode_d_par = (v_aux.array() - ((*Sigma) * ZtWZI_Sigma_inv * v_aux).array()).matrix();
cov_grad[par_count] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
par_count++;
}
}
////Only for debugging
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < par_count; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
T_mat SigmaI_plus_ZtWZ_inv = (*Sigma) - (T_mat)(C.transpose() * C);// = (Sigma^-1 + ZtWZ) ^ -1
// calculate gradient of approx. marginal likeligood wrt the mode
vec_t d_mll_d_mode = (-0.5 * SigmaI_plus_ZtWZ_inv.diagonal().array() * diag_ZtThirdDerivZ.array()).matrix();
fixed_effect_grad = -first_deriv_ll_;
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
fixed_effect_grad[i] += -0.5 * third_deriv[i] * SigmaI_plus_ZtWZ_inv.coeff(random_effects_indices_of_data[i], random_effects_indices_of_data[i]) -
second_deriv_neg_ll_[i] * (d_mll_d_mode.cwiseProduct(SigmaI_plus_ZtWZ_inv.col(random_effects_indices_of_data[i]))).sum();
}
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX
* This version is used for the Laplace approximation when there are only grouped random effects.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix
* \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
template <typename T_mat>//T_mat can be either den_mat_t or sp_mat_t
void CalcGradNegMargLikelihoodLAApproxGroupedRE(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& SigmaI,
const sp_mat_t& Zt,
std::vector<data_size_t> cum_num_rand_eff_cluster_i,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
int num_REs = (int)SigmaI.cols();//number of random effect realizations
int num_comps = (int)cum_num_rand_eff_cluster_i.size() - 1;//number of different random effect components
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLGroupedRE(y_data, y_data_int, fixed_effects, num_data, SigmaI, Zt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
sp_mat_t Z = Zt.transpose();
vec_t location_par = Z * mode_;//location parameter = mode of random effects + fixed effects
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] += fixed_effects[i];
}
}
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
// Calculate (Sigma^-1 + Zt*W*Z)^-1
sp_mat_t Id(num_REs, num_REs);
Id.setIdentity();
sp_mat_t SigmaI_plus_ZtWZ_inv = chol_fact_SigmaI_plus_ZtWZ_.solve(Id);
// calculate gradient of approx. marginal likeligood wrt the mode
//Note: the calculation of d_mll_d_mode is the bottleneck of this function (corresponding lines below are indicated with * and, in particular, **)
vec_t d_mll_d_mode(num_REs);
sp_mat_t Zt_third_deriv = Zt * third_deriv.asDiagonal();//every column of Z multiplied elementwise by third_deriv
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_REs; ++i) {
vec_t diag_d_W_d_mode_i = Zt_third_deriv.row(i);//*can be slow
//calculate Z^T * diag(diag_d_W_d_mode_i) * Z = Z^T * diag(Z.col(i) * third_deriv) * Z
sp_mat_t Zt_d_W_d_mode_i_Z = (Zt * diag_d_W_d_mode_i.asDiagonal() * Z).pruned();//**can be very slow. Note that this is also slow when the middle diagonal matrix is a pruned sparse matrix
////Variant 2: slower
//sp_mat_t Zt_third_deriv_diag = sp_mat_t(((vec_t)Zt_third_deriv.row(i)).asDiagonal());
//sp_mat_t Zt_d_W_d_mode_i_Z = Zt * Zt_third_deriv_diag * Z;//= Z^T * diag(diag_d_W_d_mode_i) * Z = Z^T * diag(Z.col(i) * third_deriv) * Z
////Variant 3: slower
//vec_t Z_i = Z.col(i);// column number i of Z
//vec_t diag_d_W_d_mode_i = (Z_i.array() * third_deriv.array()).matrix();//diagonal of derivative of matrix W wrt random effect number i
//sp_mat_t Zt_d_W_d_mode_i_Z = Zt * diag_d_W_d_mode_i.asDiagonal() * Z;//= Z^T * diag(diag_d_W_d_mode_i) * Z
d_mll_d_mode[i] = -0.5 * (Zt_d_W_d_mode_i_Z.cwiseProduct(SigmaI_plus_ZtWZ_inv)).sum();
}
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
sp_mat_t ZtWZ = Zt * second_deriv_neg_ll_.asDiagonal() * Z;
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par
vec_t SigmaI_mode = SigmaI * mode_;
double explicit_derivative;
sp_mat_t I_j(num_REs, num_REs);//Diagonal matrix with 1 on the diagonal for all random effects of component j and 0's otherwise
sp_mat_t I_j_ZtWZ;
for (int j = 0; j < num_comps; ++j) {
// calculate explicit derivative of approx. mariginal log-likelihood
std::vector<Triplet_t> triplets;//for constructing I_j
triplets.reserve(cum_num_rand_eff_cluster_i[j + 1] - cum_num_rand_eff_cluster_i[j]);
explicit_derivative = 0.;
for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) {
triplets.emplace_back(i, i, 1.);
explicit_derivative += SigmaI_mode[i] * mode_[i];
}
// Altervative version using parallelization (not faster)
//#pragma omp parallel
// {
// std::vector<Triplet_t> triplets_private;
// //triplets_private.reserve(cum_num_rand_eff_cluster_i[num_comps]);
//#pragma omp for nowait reduction(+:explicit_derivative)
// for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) {
// triplets_private.emplace_back(i, i, 1.);
// explicit_derivative += SigmaI_mode[i] * mode_[i];
// }
//#pragma omp critical
// triplets.insert(triplets.end(), triplets_private.begin(), triplets_private.end());
// }
//#pragma omp parallel for schedule(static) reduction(+:explicit_derivative)
// for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) {
// explicit_derivative += SigmaI_mode[i] * mode_[i];
// }
explicit_derivative *= -0.5;
I_j.setFromTriplets(triplets.begin(), triplets.end());
I_j_ZtWZ = I_j * ZtWZ;
explicit_derivative += 0.5 * (SigmaI_plus_ZtWZ_inv.cwiseProduct(I_j_ZtWZ)).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
d_mode_d_par = SigmaI_plus_ZtWZ_inv * I_j * Zt * first_deriv_ll_;
cov_grad[j] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
}
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxGroupedRE");
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < num_comps; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
vec_t d_detmll_d_F(num_data);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data; ++i) {
sp_mat_t zi_zit = Zt.col(i) * Z.row(i);//=Z.row(i) * (Z.row(i)).transpose()
d_detmll_d_F[i] = -0.5 * third_deriv[i] * (SigmaI_plus_ZtWZ_inv.cwiseProduct(zi_zit)).sum();
}
vec_t d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W = d_mll_d_mode.transpose() * SigmaI_plus_ZtWZ_inv * Zt * second_deriv_neg_ll_.asDiagonal();
fixed_effect_grad = -first_deriv_ll_ + d_detmll_d_F - d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W;
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxGroupedRE");
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("fixed_effect_grad[%d]: %g", i, fixed_effect_grad[i]);
//}
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxGroupedRE
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param sigma2 Variance of random effects
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const double sigma2,
const data_size_t* const random_effects_indices_of_data,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data,
sigma2, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
// calculate gradient of approx. marginal likeligood wrt the mode
vec_t d_mll_d_mode = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t third_deriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
third_deriv_private[random_effects_indices_of_data[i]] += third_deriv[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
d_mll_d_mode[i_re] += third_deriv_private[i_re];
}
}//end omp critical
}//end omp parallel
d_mll_d_mode.array() /= -2. * diag_SigmaI_plus_ZtWZ_.array();
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
vec_t diag_ZtWZ = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t diag_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtWZ[i_re] += diag_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
double explicit_derivative = -0.5 * (mode_.array() * mode_.array()).sum() / sigma2 +
0.5 * (diag_ZtWZ.array() / diag_SigmaI_plus_ZtWZ_.array()).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
vec_t d_mode_d_par = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t first_deriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
d_mode_d_par[i_re] += first_deriv_private[i_re];
}
}//end omp critical
}//end omp parallel
d_mode_d_par.array() /= diag_SigmaI_plus_ZtWZ_.array();
cov_grad[0] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale");
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad[0]: %g", cov_grad[0]);
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data; ++i) {
fixed_effect_grad[i] = -first_deriv_ll_[i] -
0.5 * third_deriv[i] / diag_SigmaI_plus_ZtWZ_[random_effects_indices_of_data[i]] - //=d_detmll_d_F
d_mll_d_mode[random_effects_indices_of_data[i]] * second_deriv_neg_ll_[i] / diag_SigmaI_plus_ZtWZ_[random_effects_indices_of_data[i]];//=implicit derivative = d_mll_d_mode * d_mode_d_F
}
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale");
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("fixed_effect_grad[%d]: %g", i, fixed_effect_grad[i]);
//}
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor
* of Sigma^-1 has previously been calculated using a Vecchia approximation.
* This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used.
* Caveat: Sigma^-1 + W can be not very sparse
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor)
* \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B
* \param B_grad Derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation
* \param D_grad Derivatives of matrices D for Vecchia approximation
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxVecchia(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& B,
const sp_mat_t& D_inv,
const std::vector<sp_mat_t>& B_grad,
const std::vector<sp_mat_t>& D_grad,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLVecchia(y_data, y_data_int, fixed_effects, num_data, B, D_inv, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
bool no_fixed_effects = (fixed_effects == nullptr);
vec_t location_par;//location parameter = mode of random effects + fixed effects
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
if (no_fixed_effects) {
CalcThirdDerivLogLik(y_data, y_data_int, mode_.data(), num_data, third_deriv.data());
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
}
// Calculate (Sigma^-1 + W)^-1
sp_mat_t Id(num_data, num_data);
Id.setIdentity();
sp_mat_t SigmaI_plus_W_inv = chol_fact_SigmaI_plus_ZtWZ_.solve(Id);
// calculate gradient of approx. marginal likeligood wrt the mode
vec_t d_mll_d_mode = -0.5 * (SigmaI_plus_W_inv.diagonal().array() * third_deriv.array()).matrix();
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
double explicit_derivative;
int num_par = (int)B_grad.size();
sp_mat_t SigmaI_deriv;
sp_mat_t BgradT_Dinv_B;
sp_mat_t Bt_Dinv_Bgrad;
for (int j = 0; j < num_par; ++j) {
SigmaI_deriv = B_grad[j].transpose() * D_inv * B;
Bt_Dinv_Bgrad = SigmaI_deriv.transpose();
SigmaI_deriv += Bt_Dinv_Bgrad - B.transpose() * D_inv * D_grad[j] * D_inv * B;
d_mode_d_par = -SigmaI_plus_W_inv * SigmaI_deriv * mode_;
explicit_derivative = 0.5 * mode_.dot(SigmaI_deriv * mode_) +
0.5 * ((D_inv.diagonal().array() * D_grad[j].diagonal().array()).sum() + (SigmaI_deriv.cwiseProduct(SigmaI_plus_W_inv)).sum());
// Alternative version (not faster)
//vec_t u = D_inv * B * mode_;
//vec_t uk = B_grad[j] * mode_;
//explicit_derivative = uk.dot(u) - 0.5 * u.dot(D_grad[j] * u) +
// 0.5 * ((D_inv.diagonal().array() * D_grad[j].diagonal().array()).sum() + (SigmaI_deriv.cwiseProduct(SigmaI_plus_W_inv)).sum());
cov_grad[j] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
}
////Only for debugging
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < num_par; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
vec_t impl_deriv = -d_mll_d_mode.transpose() * SigmaI_plus_W_inv * second_deriv_neg_ll_.asDiagonal();
fixed_effect_grad = -first_deriv_ll_ + d_mll_d_mode + impl_deriv;
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxVecchia
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
template <typename T_mat>//T_mat can be either den_mat_t or sp_mat_t
void PredictLAApproxStable(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> ZSigmaZt,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLStable<T_mat>(y_data, y_data_int, fixed_effects, num_data, ZSigmaZt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
pred_mean = Cross_Cov * first_deriv_ll_;
if (calc_pred_cov || calc_pred_var) {
sp_mat_t Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood)
Wsqrt.setIdentity();
Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
T_mat L = chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL();
T_mat Maux, Maux2;
Maux = Wsqrt * Cross_Cov.transpose();
CalcLInvH(L, Maux, Maux2, true);//Maux2 = L\(Wsqrt * Cross_Cov^T)
if (calc_pred_cov) {
pred_cov -= Maux2.transpose() * Maux2;
}
if (calc_pred_var) {
Maux2 = Maux2.cwiseProduct(Maux2);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] -= Maux2.col(i).sum();
}
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxStable");
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("first_deriv_ll_[%d]: %g", i, first_deriv_ll_[i]);
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxStable
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
template <typename T_mat>//T_mat can be either den_mat_t or sp_mat_t
void PredictLAApproxOnlyOneGPCalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> Sigma,
const data_size_t* const random_effects_indices_of_data,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale<T_mat>(y_data, y_data_int, fixed_effects,
num_data, Sigma, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
vec_t ZtFirstDeriv = vec_t::Zero(num_re_);//sqrt of diagonal matrix ZtWZ
#pragma omp parallel
{
vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re];
}
}//end omp critical
}//end omp parallel
pred_mean = Cross_Cov * ZtFirstDeriv;
if (calc_pred_cov || calc_pred_var) {
vec_t diag_ZtWZ = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtWZ.array()[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
sp_mat_t ZtWZsqrt(num_re_, num_re_);//diagonal matrix with square root of diagonal of ZtWZ
ZtWZsqrt.setIdentity();
ZtWZsqrt.diagonal().array() = diag_ZtWZ.array().sqrt();
T_mat L = chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL();
T_mat Maux, Maux2;
Maux = ZtWZsqrt * Cross_Cov.transpose();
CalcLInvH(L, Maux, Maux2, true);//Maux2 = L\(ZtWZsqrt * Cross_Cov^T)
if (calc_pred_cov) {
pred_cov -= Maux2.transpose() * Maux2;
}
if (calc_pred_var) {
Maux2 = Maux2.cwiseProduct(Maux2);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] -= Maux2.col(i).sum();
}
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxOnlyOneGPCalculationsOnREScale");
//for (int i = 0; i < 3; ++i) {
// if (Cross_Cov.rows() > 1) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
// }
// else {
// Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i));
// }
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("ZtFirstDeriv[%d]: %g", i, ZtFirstDeriv[i]);
//}
//for (int i = 0; i < std::min((int)pred_mean.size(),3); ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxOnlyOneGPCalculationsOnREScale
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX
* This version is used for the Laplace approximation when there are only grouped random effects.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix
* \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
template <typename T_mat>//T_mat can be either den_mat_t or sp_mat_t
void PredictLAApproxGroupedRE(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& SigmaI,
const sp_mat_t& Zt,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLGroupedRE(y_data, y_data_int, fixed_effects, num_data, SigmaI, Zt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
pred_mean = Cross_Cov * first_deriv_ll_;
if (calc_pred_cov || calc_pred_var) {
T_mat Maux, Maux2;
Maux = Zt * second_deriv_neg_ll_.asDiagonal() * Cross_Cov.transpose();
// calculate Maux2 = L\(Z^T * second_deriv_neg_ll_.asDiagonal() * Cross_Cov^T)
T_mat L = chol_fact_SigmaI_plus_ZtWZ_.matrixL();
CalcLInvH(L, Maux, Maux2, true);
if (calc_pred_cov) {
pred_cov += Maux2.transpose() * Maux2 - (T_mat)(Cross_Cov * second_deriv_neg_ll_.asDiagonal() * Cross_Cov.transpose());
}
if (calc_pred_var) {
T_mat Maux3 = Cross_Cov.cwiseProduct(Cross_Cov * second_deriv_neg_ll_.asDiagonal());
Maux2 = Maux2.cwiseProduct(Maux2);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] += Maux2.col(i).sum() - Maux3.row(i).sum();
}
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxGroupedRE");
//for (int i = 0; i < 3; ++i) {
// if (Cross_Cov.rows() > 1) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
// }
// else {
// Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i));
// }
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("first_deriv_ll_[%d]: %g", i, first_deriv_ll_[i]);
//}
//for (int i = 0; i < std::min((int)pred_mean.size(), 3); ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxGroupedRE
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param sigma2 Variance of random effects
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
template <typename T_mat>//T_mat can be either den_mat_t or sp_mat_t
void PredictLAApproxOnlyOneGroupedRECalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const double sigma2,
const data_size_t* const random_effects_indices_of_data,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data,
sigma2, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
vec_t ZtFirstDeriv = vec_t::Zero(num_re_);//sqrt of diagonal matrix ZtWZ
#pragma omp parallel
{
vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re];
}
}//end omp critical
}//end omp parallel
pred_mean = Cross_Cov * ZtFirstDeriv;
vec_t diag_Sigma_plus_ZtWZI = vec_t(num_re_);
diag_Sigma_plus_ZtWZI.array() = 1. / diag_SigmaI_plus_ZtWZ_.array();
diag_Sigma_plus_ZtWZI.array() /= sigma2;
diag_Sigma_plus_ZtWZI.array() -= 1.;
diag_Sigma_plus_ZtWZI.array() /= sigma2;
if (calc_pred_cov) {
T_mat Maux = Cross_Cov * diag_Sigma_plus_ZtWZI.asDiagonal() * Cross_Cov.transpose();
pred_cov += Maux;
}
if (calc_pred_var) {
T_mat Maux = Cross_Cov * diag_Sigma_plus_ZtWZI.asDiagonal();
T_mat Maux2 = Cross_Cov.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] += Maux2.row(i).sum();
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxOnlyOneGroupedRECalculationsOnREScale");
//for (int i = 0; i < 3; ++i) {
// if (Cross_Cov.rows() > 1) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
// }
// else {
// Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i));
// }
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("ZtFirstDeriv[%d]: %g", i, ZtFirstDeriv[i]);
//}
//for (int i = 0; i < std::min((int)pred_mean.size(),3); ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxOnlyOneGroupedRECalculationsOnREScale
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor
* of Sigma^-1 has previously been calculated using a Vecchia approximation.
* This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used.
* Caveat: Sigma^-1 + W can be not very sparse
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor)
* \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
template <typename T_mat>//T_mat can be either den_mat_t or sp_mat_t
void PredictLAApproxVecchia(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& B,
const sp_mat_t& D_inv,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLVecchia(y_data, y_data_int, fixed_effects, num_data, B, D_inv, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
pred_mean = Cross_Cov * first_deriv_ll_;
if (calc_pred_cov || calc_pred_var) {
T_mat SigmaI_CrossCovT = B.transpose() * D_inv * B * Cross_Cov.transpose();
// calculate Maux = L\(Sigma^-1 * Cross_Cov^T), L = Chol(Sigma^-1 + W)
T_mat Maux;
sp_mat_t L = chol_fact_SigmaI_plus_ZtWZ_.matrixL();
CalcLInvH(L, SigmaI_CrossCovT, Maux, true);
if (calc_pred_cov) {
pred_cov += -Cross_Cov * SigmaI_CrossCovT + Maux.transpose() * Maux;
}
if (calc_pred_var) {
Maux = Maux.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] += Maux.col(i).sum() - (Cross_Cov.row(i)).dot(SigmaI_CrossCovT.col(i));
}
}
}
}//end PredictLAApproxVecchia
/*!
* \brief Make predictions for the response variable (label) based on predictions for the mean and variance of the latent random effects
* \param pred_mean[out] Predicted mean of latent random effects. The predicted mean for the response variables is written on this
* \param pred_var[out] Predicted variances of latent random effects. The predicted variance for the response variables is written on this
* \param predict_var If true, predictive variances are also calculated
*/
void PredictResponse(vec_t& pred_mean, vec_t& pred_var, bool predict_var = false) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_mean[i] = normalCDF(pred_mean[i] / std::sqrt(1. + pred_var[i]));
}
if (predict_var) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] = pred_mean[i] * (1. - pred_mean[i]);
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_mean[i] = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]);
}
if (predict_var) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] = pred_mean[i] * (1. - pred_mean[i]);
}
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
double pm = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]);
if (predict_var) {
double psm = RespMeanAdaptiveGHQuadrature(2 * pred_mean[i], 4 * pred_var[i]);
pred_var[i] = psm - pm * pm + pm;
}
pred_mean[i] = pm;
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
double pm = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]);
if (predict_var) {
double psm = RespMeanAdaptiveGHQuadrature(2 * pred_mean[i], 4 * pred_var[i]);
pred_var[i] = psm - pm * pm + psm / aux_pars_[0];
}
pred_mean[i] = pm;
}
}
}
/*!
* \brief Adaptive GH quadrature to calculate predictive mean of response variable
* \param latent_mean Predicted mean of latent random effects
* \param latent_var Predicted variances of latent random effects
*/
double RespMeanAdaptiveGHQuadrature(const double latent_mean, const double latent_var) {
// Find mode of integrand
double mode_integrand, mode_integrand_last, update;
mode_integrand = 0.;
double sigma2_inv = 1. / latent_var;
double sqrt_sigma2_inv = std::sqrt(sigma2_inv);
for (int it = 0; it < 100; ++it) {
mode_integrand_last = mode_integrand;
update = (FirstDerivLogCondMeanLikelihood(mode_integrand) - sigma2_inv * (mode_integrand - latent_mean))
/ (SecondDerivLogCondMeanLikelihood(mode_integrand) - sigma2_inv);
mode_integrand -= update;
if (std::abs(update) / std::abs(mode_integrand_last) < DELTA_REL_CONV_) {
break;
}
}
// Adaptive GH quadrature
double sqrt2_sigma_hat = M_SQRT2 / std::sqrt(-SecondDerivLogCondMeanLikelihood(mode_integrand) + sigma2_inv);
double x_val;
double mean_resp = 0.;
for (int j = 0; j < order_GH_; ++j) {
x_val = sqrt2_sigma_hat * GH_nodes_[j] + mode_integrand;
mean_resp += adaptive_GH_weights_[j] * CondMeanLikelihood(x_val) * normalPDF(sqrt_sigma2_inv * (x_val - latent_mean));
}
mean_resp *= sqrt2_sigma_hat * sqrt_sigma2_inv;
return mean_resp;
////non-adaptive GH quadrature
//double mean_resp = 0.;
//double sigma = std::sqrt(latent_var);
//for (int j = 0; j < order_GH_; ++j) {
// mean_resp += GH_weights_[j] * CondMeanLikelihood(M_SQRT2 * sigma * GH_nodes_[j] + latent_mean);
//}
//pred_mean *= M_1_SQRTPI_;
}
template <typename T>//T can be double or float
bool AreSame(const T a, const T b) const {
return fabs(a - b) < a * EPSILON_;
}
// Used for likelihood_type_ == "bernoulli_probit"
inline double normalCDF(double value) const {
return 0.5 * std::erfc(-value * M_SQRT1_2);
}
inline double normalPDF(double value) const {
return std::exp(-value * value / 2) / M_SQRT2PI_;
//return std::exp(-value * value / 2) / std::sqrt(2 * M_PI);
}
private:
/*! \brief Number of data points */
data_size_t num_data_;
/*! \brief Number (dimension) of random effects */
data_size_t num_re_;
/*! \brief Posterior mode used for Laplace approximation */
vec_t mode_;
/*! \brief Posterior mode used for Laplace approximation: saving a previously found value allows for reseting the mode when having a too large step size. */
vec_t mode_previous_value_;
/*! \brief Auxiliary variable a=ZSigmaZt^-1 mode_b used for Laplace approximation */
vec_t a_vec_;
/*! \brief First derivatives of the log-likelihood */
vec_t first_deriv_ll_;
/*! \brief Second derivatives of the negative log-likelihood (diagonal of matrix "W") */
vec_t second_deriv_neg_ll_;
/*! \brief Diagonal of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version 'GroupedRE' when there is only one random effect and ZtWZ is diagonal. Otherwise 'diag_SigmaI_plus_ZtWZ_' is used for grouped REs) */
vec_t diag_SigmaI_plus_ZtWZ_;
/*! \brief Cholesky factors of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in versions 'Vecchia' and 'GroupedRE'. For grouped REs, this is used if there is more than one random effect) */
chol_sp_mat_t chol_fact_SigmaI_plus_ZtWZ_;
/*!
* \brief Cholesky factors of matrix B = I + Wsqrt * Z * Sigma * Zt * Wsqrt in Laplace approximation (for version 'Stable')
* or of matrix B = Id + ZtWZsqrt * Sigma * ZtWZsqrt (for version 'OnlyOneGPCalculationsOnREScale')
*/
T_chol chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_;
/*! \brief If true, the mode has been initialized to 0 */
bool mode_initialized_ = false;
/*! \brief If true, the mode has been determined */
bool mode_has_been_calculated_ = false;
/*! \brief If true, the function 'CheckY' has been called */
bool normalizing_constant_has_been_calculated_ = false;
/*! \brief Normalizing constant for likelihoods (not all likelihoods have one) */
double log_normalizing_constant_;
/*! \brief Type of likelihood */
string_t likelihood_type_ = "gaussian";
/*! \brief List of supported covariance likelihoods */
const std::set<string_t> SUPPORTED_LIKELIHOODS_{ "gaussian", "bernoulli_probit", "bernoulli_logit", "poisson", "gamma" };
/*! \brief Tolerance level when comparing two doubles for equality */
double EPSILON_ = 1e-6;
/*! \brief Maximal number of iteration done for finding posterior mode with Newton's method */
int MAXIT_MODE_NEWTON_ = 1000;
/*! \brief Used for cheking convergence in mode finding algorithm (terminate if relative change in Laplace approx. is below this value) */
double DELTA_REL_CONV_ = 1e-6;
/*! \brief Additional parameters for likelihoods. For gamma, auxiliary_pars_[0] = shape parameter */
std::vector<double> aux_pars_;
string_t ParseLikelihoodAlias(const string_t& likelihood) {
if (likelihood == string_t("binary") || likelihood == string_t("bernoulli_probit") || likelihood == string_t("binary_probit")) {
return "bernoulli_probit";
}
else if (likelihood == string_t("gaussian") || likelihood == string_t("regression")) {
return "gaussian";
}
return likelihood;
}
//Derived constants not defined in cmath
//1/sqrt(2*pi)
const double M_SQRT2PI_ = std::sqrt(2. * M_PI);
////1/sqrt(pi) (not used anymore, used for non-adaptive GH quadrature)
//const double M_1_SQRTPI_ = M_2_SQRTPI / 2.;
/*! \brief Order of the Gauss-Hermite quadrature */
int order_GH_ = 30;
/*! \brief Nodes and weights for the Gauss-Hermite quadrature */
// Source: https://keisan.casio.com/exec/system/1281195844
const std::vector<double> GH_nodes_ = { -6.863345293529891581061,
-6.138279220123934620395,
-5.533147151567495725118,
-4.988918968589943944486,
-4.48305535709251834189,
-4.003908603861228815228,
-3.544443873155349886925,
-3.099970529586441748689,
-2.667132124535617200571,
-2.243391467761504072473,
-1.826741143603688038836,
-1.415527800198188511941,
-1.008338271046723461805,
-0.6039210586255523077782,
-0.2011285765488714855458,
0.2011285765488714855458,
0.6039210586255523077782,
1.008338271046723461805,
1.415527800198188511941,
1.826741143603688038836,
2.243391467761504072473,
2.667132124535617200571,
3.099970529586441748689,
3.544443873155349886925,
4.003908603861228815228,
4.48305535709251834189,
4.988918968589943944486,
5.533147151567495725118,
6.138279220123934620395,
6.863345293529891581061 };
const std::vector<double> GH_weights_ = { 2.908254700131226229411E-21,
2.8103336027509037088E-17,
2.87860708054870606219E-14,
8.106186297463044204E-12,
9.1785804243785282085E-10,
5.10852245077594627739E-8,
1.57909488732471028835E-6,
2.9387252289229876415E-5,
3.48310124318685523421E-4,
0.00273792247306765846299,
0.0147038297048266835153,
0.0551441768702342511681,
0.1467358475408900997517,
0.2801309308392126674135,
0.386394889541813862556,
0.3863948895418138625556,
0.2801309308392126674135,
0.1467358475408900997517,
0.0551441768702342511681,
0.01470382970482668351528,
0.002737922473067658462989,
3.48310124318685523421E-4,
2.938725228922987641501E-5,
1.579094887324710288346E-6,
5.1085224507759462774E-8,
9.1785804243785282085E-10,
8.10618629746304420399E-12,
2.87860708054870606219E-14,
2.81033360275090370876E-17,
2.9082547001312262294E-21 };
const std::vector<double> adaptive_GH_weights_ = { 0.83424747101276179534,
0.64909798155426670071,
0.56940269194964050397,
0.52252568933135454964,
0.491057995832882696506,
0.46837481256472881677,
0.45132103599118862129,
0.438177022652683703695,
0.4279180629327437485828,
0.4198950037368240886418,
0.413679363611138937184,
0.4089815750035316024972,
0.4056051233256844363121,
0.403419816924804022553,
0.402346066701902927115,
0.4023460667019029271154,
0.4034198169248040225528,
0.4056051233256844363121,
0.4089815750035316024972,
0.413679363611138937184,
0.4198950037368240886418,
0.427918062932743748583,
0.4381770226526837037,
0.45132103599118862129,
0.46837481256472881677,
0.4910579958328826965056,
0.52252568933135454964,
0.56940269194964050397,
0.64909798155426670071,
0.83424747101276179534 };
};
} // namespace GPBoost
#endif // GPB_LIKELIHOODS_
|
DRB110-ordered-orig-no-omp.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "omprace.h"
#include <omp.h>
#include <assert.h>
#include <stdio.h>
/* This is a program based on a test contributed by Yizi Gu@Rice Univ.
* Proper user of ordered directive and clause, no data races
* */
int main()
{
//omprace_init();
int x =0;
#pragma omp parallel for ordered num_threads(2)
for (int i = 0; i < 10; ++i) {
#pragma omp ordered
{
printf("i = %d\n", i);
x++;
}
}
assert (x==10);
printf ("x=%d\n",x);
//omprace_fini();
return 0;
}
|
layerramdistancetransform.h | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2017-2019 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#ifndef IVW_LAYERRAMDISTANCETRANSFORM_H
#define IVW_LAYERRAMDISTANCETRANSFORM_H
#include <modules/base/basemoduledefine.h>
#include <inviwo/core/common/inviwo.h>
#include <inviwo/core/util/indexmapper.h>
#include <inviwo/core/datastructures/image/layer.h>
#include <inviwo/core/datastructures/image/layerram.h>
#include <inviwo/core/datastructures/image/layerramprecision.h>
#ifndef __clang__
#include <omp.h>
#endif
namespace inviwo {
namespace util {
/**
* Implementation of Euclidean Distance Transform according to Saito's algorithm:
* T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations
* of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11).
* pp. 1551-1565, 1994.
* http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf
*
* Calculates the distance in base mat space
* * Predicate is a function of type (const T &value) -> bool to deside if a value in the input
* is a "feature".
* * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all
* squared distance values at the end of the calculation.
* * ProcessCallback is a function of type (double progress) -> void that is called with a value
* from 0 to 1 to indicate the progress of the calculation.
*/
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer,
LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis,
const size2_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename T, typename U>
void layerRAMDistanceTransform(const LayerRAMPrecision<T> *inVolume,
LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis,
const size2_t upsample);
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField,
const size2_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename U, typename ProgressCallback>
void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField,
const size2_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale, ProgressCallback callback);
template <typename U>
void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField,
const size2_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale);
} // namespace util
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void util::layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer,
LayerRAMPrecision<U> *outDistanceField,
const Matrix<2, U> basis, const size2_t upsample,
Predicate predicate, ValueTransform valueTransform,
ProgressCallback callback) {
#ifndef __clang__
omp_set_num_threads(std::thread::hardware_concurrency());
#endif
using int64 = glm::int64;
using i64vec2 = glm::tvec2<int64>;
auto square = [](auto a) { return a * a; };
callback(0.0);
const T *src = inLayer->getDataTyped();
U *dst = outDistanceField->getDataTyped();
const i64vec2 srcDim{inLayer->getDimensions()};
const i64vec2 dstDim{outDistanceField->getDimensions()};
const i64vec2 sm{upsample};
const auto squareBasis = glm::transpose(basis) * basis;
const Vector<2, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1]};
const Vector<2, U> squareVoxelSize{squareBasisDiag / Vector<2, U>{dstDim * dstDim}};
const Vector<2, U> invSquareVoxelSize{Vector<2, U>{1.0f} / squareVoxelSize};
{
const auto maxdist = glm::compMax(squareBasisDiag);
bool orthogonal = true;
for (size_t i = 0; i < squareBasis.length(); i++) {
for (size_t j = 0; j < squareBasis.length(); j++) {
if (i != j) {
if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) {
orthogonal = false;
break;
}
}
}
}
if (!orthogonal) {
LogWarnCustom(
"layerRAMDistanceTransform",
"Calculating the distance transform on a non-orthogonal layer will not give "
"correct values");
}
}
if (srcDim * sm != dstDim) {
throw Exception(
"DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) +
" dst = " + toString(dstDim) + " scaling = " + toString(sm),
IVW_CONTEXT_CUSTOM("layerRAMDistanceTransform"));
}
util::IndexMapper<2, int64> srcInd(srcDim);
util::IndexMapper<2, int64> dstInd(dstDim);
auto is_feature = [&](const int64 x, const int64 y) {
return predicate(src[srcInd(x / sm.x, y / sm.y)]);
};
// first pass, forward and backward scan along x
// result: min distance in x direction
#pragma omp parallel for
for (int64 y = 0; y < dstDim.y; ++y) {
// forward
U dist = static_cast<U>(dstDim.x);
for (int64 x = 0; x < dstDim.x; ++x) {
if (!is_feature(x, y)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y)] = squareVoxelSize.x * square(dist);
}
// backward
dist = static_cast<U>(dstDim.x);
for (int64 x = dstDim.x - 1; x >= 0; --x) {
if (!is_feature(x, y)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y)] = std::min<U>(dst[dstInd(x, y)], squareVoxelSize.x * square(dist));
}
}
// second pass, scan y direction
// for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY
// result: min distance in x and y direction
callback(0.45);
#pragma omp parallel
{
std::vector<U> buff;
buff.resize(dstDim.y);
#pragma omp for
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 y = 0; y < dstDim.y; ++y) {
buff[y] = dst[dstInd(x, y)];
}
for (int64 y = 0; y < dstDim.y; ++y) {
auto d = buff[y];
if (d != U(0)) {
const auto rMax = static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1;
const auto rStart = std::min(rMax, y - 1);
const auto rEnd = std::min(rMax, dstDim.y - y);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[y + n] + squareVoxelSize.y * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y)] = d;
}
}
}
// scale data
callback(0.9);
const int64 layerSize = dstDim.x * dstDim.y;
#pragma omp parallel for
for (int64 i = 0; i < layerSize; ++i) {
dst[i] = valueTransform(dst[i]);
}
callback(1.0);
}
template <typename T, typename U>
void util::layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer,
LayerRAMPrecision<U> *outDistanceField,
const Matrix<2, U> basis, const size2_t upsample) {
util::layerRAMDistanceTransform(
inLayer, outDistanceField, basis, upsample,
[](const T &val) { return util::glm_convert_normalized<double>(val) > 0.5; },
[](const U &squareDist) {
return static_cast<U>(std::sqrt(static_cast<double>(squareDist)));
},
[](double f) {});
}
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField,
const size2_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback) {
const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>();
inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) {
layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample,
predicate, valueTransform, callback);
});
}
template <typename U, typename ProgressCallback>
void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField,
const size2_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale, ProgressCallback progress) {
const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>();
inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) {
using ValueType = util::PrecisionValueType<decltype(lrprecision)>;
const auto predicateIn = [threshold](const ValueType &val) { return val < threshold; };
const auto predicateOut = [threshold](const ValueType &val) { return val > threshold; };
const auto normPredicateIn = [threshold](const ValueType &val) {
return util::glm_convert_normalized<double>(val) < threshold;
};
const auto normPredicateOut = [threshold](const ValueType &val) {
return util::glm_convert_normalized<double>(val) > threshold;
};
const auto valTransIdent = [scale](const float &squareDist) {
return static_cast<float>(scale * squareDist);
};
const auto valTransSqrt = [scale](const float &squareDist) {
return static_cast<float>(scale * std::sqrt(squareDist));
};
if (normalize && square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateIn, valTransIdent, progress);
} else if (normalize && square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateOut, valTransIdent, progress);
} else if (normalize && !square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateIn, valTransSqrt, progress);
} else if (normalize && !square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateOut, valTransSqrt, progress);
} else if (!normalize && square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateIn, valTransIdent, progress);
} else if (!normalize && square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateOut, valTransIdent, progress);
} else if (!normalize && !square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateIn, valTransSqrt, progress);
} else if (!normalize && !square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateOut, valTransSqrt, progress);
}
});
}
template <typename U>
void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField,
const size2_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale) {
util::layerDistanceTransform(inLayer, outDistanceField, upsample, threshold, normalize, flip,
square, scale, [](double) {});
}
} // namespace inviwo
#endif // IVW_LAYERRAMDISTANCETRANSFORM_H
|
hermv_c_bsr_u_lo_trans.c | #include<string.h>
#ifdef _OPENMP
#include<omp.h>
#endif
#include"alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_BSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows * A->block_size;
const ALPHA_INT n = A->cols * A->block_size;
const ALPHA_INT bs = A->block_size;
const ALPHA_INT bs2 = bs * bs;
// assert(m==n);
ALPHA_INT b_rows = A->rows;
ALPHA_INT b_cols = A->cols;
if (b_rows != b_cols)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->rows_end, b_rows, thread_num, partition);
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
const ALPHA_INT tid = alpha_get_thread_id();
const ALPHA_INT local_m_s = partition[tid];
const ALPHA_INT local_m_e = partition[tid + 1];
tmp[tid] = (ALPHA_Number *)malloc(sizeof(ALPHA_Number) * b_rows * bs);
memset(tmp[tid], 0, sizeof(ALPHA_Number) * b_rows * bs);
if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = block_start; ai < lower_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
//tmp[tid][b_row + row] += alpha*A->values[a0_idx + (b_row + 1) * bs]*x[col + b_col];
for (ALPHA_INT b_col = 0; b_col < b_row; b_col++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
}
}
}
else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT lower_end = alpha_upper_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = block_start; ai < lower_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = b_col + 1; b_row < bs; b_row++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < b_cols * bs; ++i)
{
ALPHA_Number tmp_y;
alpha_setzero(tmp_y);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(tmp_y, tmp_y, tmp[j][i]);
//tmp_y += tmp[j][i];
}
alpha_mul(y[i], y[i], beta);
alpha_madde(y[i], x[i], alpha);
alpha_madde(y[i], tmp_y, alpha);
//y[i] = y[i]*beta + tmp_y*alpha;
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
free(tmp[i]);
}
free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
saturnin-differential-trail-weight-three-rounds.c | #include "saturnin-common.h"
#include "../global-common.h"
#include "../convolution.h"
#include <math.h>
#include <stdbool.h>
#include <omp.h>
#define NUMBER_OF_ELEMENTS (1 << BOX_WIDTH)
#define DT_SIZE (NUMBER_OF_ELEMENTS*NUMBER_OF_ELEMENTS)
#define NUM_THREADS 36
#define BOUND 36
static int difference_table_even[DT_SIZE];
static int difference_table_odd[DT_SIZE];
/*
* Count number of input differences equal to v in column corresponding to output difference b
*/
void
count_input(mpz_t c, const uint16_t b, const int v, int index)
{
mpz_set_ui(c, 0);
for (int i = 0; i < 16; i++) {
if ((index % 2 == 0) && difference_table_even[16*i+b] == v) {
mpz_add_ui(c, c, 1);
}
if ((index % 2 != 0) && difference_table_odd[16*i+b] == v) {
mpz_add_ui(c, c, 1);
}
}
}
/*
* Count number of output differences equal to v in row corresponding to input difference a
*/
void
count_output(mpz_t c, const uint16_t a, const int v, int index)
{
mpz_set_ui(c, 0);
for (int i = 0; i < 16; i++) {
if ((index % 2 == 0) && difference_table_even[16*a+i] == v) {
mpz_add_ui(c, c, 1);
}
if ((index % 2 != 0) && difference_table_odd[16*a+i] == v) {
mpz_add_ui(c, c, 1);
}
}
}
/*
* b is an encoding of a 4x4 rectangle of bits as a flat 16-bit array where (i, j) is mapped to (4*i+j).
* This function swaps entries i and j.
*/
uint16_t
swap(const uint16_t b, const unsigned long i, const unsigned long j)
{
uint16_t x = ((b >> i) ^ (b >> j)) & 1U; // XOR temporary
return b ^ ((x << i) | (x << j));
}
/*
* An activity pattern is represented by a 4x4 rectangle where the position of an activity bit corresponding to a nibble with a given index
* is as follows:
* 3 2 1 0
* 6 5 4 7
* 9 8 11 10
* 12 15 14 13
* For example, the activity bit of a nibble with index 4 is in position (1,1) in the rectangle.
* The actual entries consist of 0's and 1's, indicating whether a nibble is active or not.
* In this representation a row in the rectangle corresponds to the input of mix columns and a column to
* the input of the inverse of mix columns.
*
* The rectangle is stored as 16 bits, where (i, j) is mapped to (4*i+j)
*
* This function converts this representation to the sequential one:
* 0 1 2 3
* 4 5 6 7
* 8 9 10 11
* 12 13 14 15
*/
uint16_t
convert(const uint16_t b)
{
uint16_t r = swap(b, 0, 3);
r = swap(r, 1, 2);
r = swap(r, 4, 6);
r = swap(r, 8, 9);
r = swap(r, 10, 11);
return swap(r, 13, 15);
}
int
main(void)
{
// We know that the number of candidate differences is 349, because we used the code below
// to compute and print them.
uint16_t candidate_differences[349];
// Consider a single slice, consisting of 16 nibbles.
// Associated with this are 2^16 possible activity patterns. Each activity pattern is represented by a rectangle as
// described above where rows correspond to mix columns input and columns to inverse mix columns input.
// We step through each activity pattern.
for (long x = 0, k = 0; x <= UINT16_MAX; x++) {
long min_box_weight = hamming_weight16(x);
// Consider each column
for (long j = 0; j < 4; j++) {
long active = 0;
for (long i = 0; i < 4; i++) {
if ((x & (1U << (4*i+j))) != 0) {
++active;
}
}
if (active != 0) {
// Mix columns ensures at least 5 active nibbles
min_box_weight += 5-active;
}
}
// Consider each row
for (long i = 0; i < 4; i++) {
long active = 0;
for (long j = 0; j < 4; j++) {
if ((x & (1U << (4*i+j))) != 0) {
++active;
}
}
if (active != 0) {
// Mix columns inverse ensures at least 5 active nibbles
min_box_weight += 5-active;
}
}
// Each active nibble contributes at least a weight of 2, so we check whether this lower bound is below
// the upper bound that we set.
if (min_box_weight <= BOUND/2) {
// convert to sequential representation.
candidate_differences[k] = convert(x);
k++;
}
}
fill_difference_table(difference_table_even, sbox_even, BOX_WIDTH);
fill_difference_table(difference_table_odd, sbox_odd, BOX_WIDTH);
Table_fixed *total_weight = table_fixed_create(1+BOUND);
Table_fixed *weights[349];
for (long i = 0; i < 349; i++) {
weights[i] = table_fixed_create(1+BOUND);
}
// The number of iterations of the main loop is bounded from above by 2^9
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel for schedule(dynamic)
// Consider each candidate difference
for (long i = 0; i < 349; i++) {
mpz_t val;
mpz_init(val);
uint16_t x = candidate_differences[i];
int box_weight_a = hamming_weight16(x);
// The number of iterations of loop is bounded from above by 2^16, since w <= 4
// Step over all possible differences after the second Sbox-layer,
// having box-activity pattern equal to x
for (uint64_t a = 0; a < (uint64_t) pow(2, 4*box_weight_a); a++) {
if (box_weight(a, box_weight_a, 4) != box_weight_a) {
continue;
}
// Build the difference before the third Sbox-layer
uint64_t state1 = 0;
for (long j = 0, m = 0; j < 16 && m < box_weight_a; j++) {
if ((x & (1U << j)) != 0) {
state1 |= ((a >> 4*m) & 0xf) << 4*j;
m++;
}
}
state1 = permute_slice(state1);
state1 = mix_columns_slice(state1);
state1 = permute_slice_inverse(state1);
// Compute the convolution of the histograms of the third Sbox-layer
Table_fixed *bases[32];
for (long j = 16; j < 32; j++) {
bases[j] = table_fixed_create(4);
}
for (long j = 0; j < 16; j++) {
count_output(val, (state1 >> 4*j) & 0xf, 16, j);
table_fixed_insert_and_merge(bases[16+j], 0, val, &mpz_add);
count_output(val, (state1 >> 4*j) & 0xf, 4, j);
table_fixed_insert_and_merge(bases[16+j], 2, val, &mpz_add);
count_output(val, (state1 >> 4*j) & 0xf, 2, j);
table_fixed_insert_and_merge(bases[16+j], 3, val, &mpz_add);
}
Table_fixed *convolved_state1 = table_fixed_copy(bases[16]);
for (long j = 17; j < 32; j++) {
Table_fixed *tmp = convolve_fixed(convolved_state1, bases[j], 0, BOUND);
table_fixed_destroy(convolved_state1);
convolved_state1 = tmp;
}
// For a fixed difference AFTER the second Sbox-layer, consider all the differences BEFORE the second Sbox-layer,
// the number of which is bounded from above by 2^12 (empirically verified)
for (uint64_t b = 0; b < (uint64_t) pow(2, 4*box_weight_a); b++) {
bool valid = true;
long differential_weight_middle = 0;
for (long j = 0, m = 0; j < 16 && m < box_weight_a; j++) {
if ((x & (1U << j)) != 0) {
long row_index = (b >> 4*m) & 0xf;
long column_index = (a >> 4*m) & 0xf;
long count;
if (j % 2 == 0) {
count = difference_table_even[16*row_index+column_index];
} else {
count = difference_table_odd[16*row_index+column_index];
}
if (count == 0) {
valid = false;
break;
}
differential_weight_middle += log2((double) NUMBER_OF_ELEMENTS / count);
m++;
}
}
if (valid) {
uint64_t state0 = 0;
for (long j = 0, m = 0; j < 16 && m < box_weight_a; j++) {
if ((x & (1U << j)) != 0) {
state0 |= ((b >> 4*m) & 0xf) << 4*j;
m++;
}
}
state0 = mix_columns_slice_inverse(state0);
// Filter anything that will exceed the bound to save us the work of doing the convolution
if (box_weight_a + box_weight(state0, 16, BOX_WIDTH) + box_weight(state1, 16, BOX_WIDTH) > BOUND/2) {
continue;
}
// at this point, we can do the convolution
// use b1 and b2 to determine active sboxes and
// convolve
for (long j = 0; j < 16; j++) {
/* differential weight 0, 2, and 3 */
bases[j] = table_fixed_create(4);
}
for (long j = 0; j < 16; j++) {
count_input(val, (state0 >> 4*j) & 0xf, 16, j);
table_fixed_insert_and_merge(bases[j], 0, val, &mpz_add);
count_input(val, (state0 >> 4*j) & 0xf, 4, j);
table_fixed_insert_and_merge(bases[j], 2, val, &mpz_add);
count_input(val, (state0 >> 4*j) & 0xf, 2, j);
table_fixed_insert_and_merge(bases[j], 3, val, &mpz_add);
}
Table_fixed *convolved_state_total = table_fixed_copy(convolved_state1);
for (long j = 0; j < 16; j++) {
Table_fixed *tmp = convolve_fixed(convolved_state_total, bases[j], 0, BOUND-differential_weight_middle);
table_fixed_destroy(convolved_state_total);
convolved_state_total = tmp;
}
for (size_t j = 0; j <= BOUND-differential_weight_middle; j++) {
table_fixed_insert_and_merge(weights[i],
j+differential_weight_middle,
convolved_state_total->head[j],
&mpz_add);
}
for (long j = 0; j < 16; j++) {
table_fixed_destroy(bases[j]);
}
table_fixed_destroy(convolved_state_total);
}
}
for (long j = 16; j < 32; j++) {
table_fixed_destroy(bases[j]);
}
table_fixed_destroy(convolved_state1);
}
mpz_clear(val);
char filename[60];
sprintf(filename, "data/saturnin-differential-trail-three-rounds_%04x.txt", x);
table_fixed_print(filename, weights[i]);
}
for (long i = 0; i <= BOUND; i++) {
for (long j = 0; j < 349; j++) {
table_fixed_insert_and_merge(total_weight, i, weights[j]->head[i], &mpz_add);
}
}
table_fixed_print("data/saturnin-differential-trail-three-rounds_total.txt", total_weight);
for (long i = 0; i < 349; i++) {
table_fixed_destroy(weights[i]);
}
table_fixed_destroy(total_weight);
return 0;
}
|
pi-omp1.c | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file exercise7.c
* @author Alessandro Capotondi
* @date 27 Mar 2020
* @brief Exercise 8
*
* Pi calculation
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <stdio.h>
#include <omp.h>
#include "utils.h"
/**
* @brief EX 8- Pi Calculation
*
* This program computes pi as
* \pi = 4 arctan(1)
* = 4 \int _0 ^1 \frac{1} {1 + x^2} dx
*
* @return void
*/
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include "utils.h"
#if !defined(ITERS)
#define ITERS (4)
#endif
#define NSTEPS 134217728
void exercise(){
long i;
double dx = 1.0 / NSTEPS;
double pi = 0.0;
double start_time = omp_get_wtime();
#pragma omp parallel
for (i = 0; i < NSTEPS; i++)
{
double x = (i + 0.5) * dx;
#pragma omp critical
pi += 1.0 / (1.0 + x * x);
}
pi *= 4.0 * dx;
double run_time = omp_get_wtime() - start_time;
double ref_pi = 4.0 * atan(1.0);
printf("pi with %d steps is %.10f in %.6f seconds (error=%e)\n",
NSTEPS, pi, run_time, fabs(ref_pi - pi));
}
int
main(int argc, char** argv)
{
for(int i=0; i<ITERS; i++){
printf("\n\n");
printf("============================\n");
printf("Test - Iteration %d...\n", i);
printf("============================\n");
start_stats();
exercise();
collect_stats();
}
printf("\n\n");
printf("============================\n");
printf("Statistics\n");
printf("============================\n");
print_stats();
return 0;
}
|
GB_binop__le_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int32)
// A*D function (colscale): GB (_AxD__le_int32)
// D*A function (rowscale): GB (_DxB__le_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int32)
// C=scalar+B GB (_bind1st__le_int32)
// C=scalar+B' GB (_bind1st_tran__le_int32)
// C=A+scalar GB (_bind2nd__le_int32)
// C=A'+scalar GB (_bind2nd_tran__le_int32)
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT32 || GxB_NO_LE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
depend-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
extern int a[][10], a2[][10];
int b[10], c[10][2], d[10], e[10], f[10];
int b2[10], c2[10][2], d2[10], e2[10], f2[10];
int k[10], l[10], m[10], n[10], o;
int *p;
void bar (void);
int t[10];
#pragma omp threadprivate (t)
void
foo (int g[3][10], int h[4][8], int i[2][10], int j[][9],
int g2[3][10], int h2[4][8], int i2[2][10], int j2[][9])
{
#pragma omp task depend(in: bar[2:5]) /* { dg-error "is not a variable" } */
;
#pragma omp task depend(out: t[2:5])
;
#pragma omp task depend(inout: k[0.5:]) /* { dg-error "low bound \[^\n\r]* of array section does not have integral type" } */
;
#pragma omp task depend(in: l[:7.5f]) /* { dg-error "length \[^\n\r]* of array section does not have integral type" } */
;
#pragma omp task depend(out: m[p:]) /* { dg-error "low bound \[^\n\r]* of array section does not have integral type" } */
;
#pragma omp task depend(inout: n[:p]) /* { dg-error "length \[^\n\r]* of array section does not have integral type" } */
;
#pragma omp task depend(in: o[2:5]) /* { dg-error "does not have pointer or array type" } */
;
#pragma omp task depend(out: a[:][2:4]) /* { dg-error "array type length expression must be specified" } */
;
#pragma omp task depend(inout: b[-1:]) /* { dg-error "negative low bound in array section" } */
;
#pragma omp task depend(inout: c[:-3][1:1]) /* { dg-error "negative length in array section" } */
;
#pragma omp task depend(in: d[11:]) /* { dg-error "low bound \[^\n\r]* above array section size" } */
;
#pragma omp task depend(out: e[:11]) /* { dg-error "length \[^\n\r]* above array section size" } */
;
#pragma omp task depend(out: f[1:10]) /* { dg-error "high bound \[^\n\r]* above array section size" } */
;
#pragma omp task depend(in: g[:][2:4]) /* { dg-error "for pointer type length expression must be specified" } */
;
#pragma omp task depend(in: h[2:2][-1:]) /* { dg-error "negative low bound in array section" } */
;
#pragma omp task depend(inout: h[:1][:-3]) /* { dg-error "negative length in array section" } */
;
#pragma omp task depend(out: i[:1][11:]) /* { dg-error "low bound \[^\n\r]* above array section size" } */
;
#pragma omp task depend(in: j[3:4][:10]) /* { dg-error "length \[^\n\r]* above array section size" } */
;
#pragma omp task depend(out: j[30:10][5:5]) /* { dg-error "high bound \[^\n\r]* above array section size" } */
;
#pragma omp task depend(out: a2[:3][2:4])
;
#pragma omp task depend(inout: b2[0:])
;
#pragma omp task depend(inout: c2[:3][1:1])
;
#pragma omp task depend(in: d2[9:])
;
#pragma omp task depend(out: e2[:10])
;
#pragma omp task depend(out: f2[1:9])
;
#pragma omp task depend(in: g2[:2][2:4])
;
#pragma omp task depend(in: h2[2:2][0:])
;
#pragma omp task depend(inout: h2[:1][:3])
;
#pragma omp task depend(out: i2[:1][9:])
;
#pragma omp task depend(in: j2[3:4][:9])
;
#pragma omp task depend(out: j2[30:10][5:4])
;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
simd-8.c | /* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
#include <stdlib.h>
#include <math.h>
#define EPS 0.005
int P[1000];
float A[1000];
float do_work(float *arr)
{
float pri;
#pragma omp simd lastprivate(pri)
for (int i = 0; i < 999; ++i)
{
int j = P[i];
pri = 0.5f;
if (j % 2 == 0)
{
pri = A[j+1] + arr[i];
}
A[j] = pri * 1.5f;
pri = pri + A[j];
}
return pri;
}
int main(void)
{
float pri, arr[1000], diff;
for (int i = 0; i < 1000; ++i)
{
P[i] = i;
A[i] = i * 1.5f;
arr[i] = i * 1.8f;
}
pri = do_work(&arr[0]);
diff = pri - 8237.25;
if (diff > EPS || -diff > EPS)
abort ();
return 0;
}
|
bindthreads.c | #define _GNU_SOURCE
#include <mpi.h>
#include <pthread.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <errno.h>
#include <string.h>
#include <numa.h>
#define MAX_CPUS_PER_NODE 176
#define POWER9 0x4e // witherspoon
/* Processor Version Register */
#define SPRN_PVR 0x11F
/* Version field */
#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF)
int get_pvr(void)
{
unsigned long ret;
int msr, pvr;
asm volatile("mfspr %0,%1" : "=r" (ret): "i" (SPRN_PVR));
msr = (int) ret;
pvr = PVR_VER(msr);
return pvr;
}
//===========================================================================
// This routine will bind threads only if the env variable BIND_THREADS=yes.
// This version uses a simple cpulist : 0,1,2,..., max_cpus - 1.
// default : spread out MPI ranks and threads evenly
// options : BIND_STRIDE=number ... stride per process not per thread
// BIND_SKIP=number of logical cpus to skip
// BIND_CPU_LIST="cpu1,cpu2,cpu3,...,cpuN" a specific list
//===========================================================================
#pragma bindthreads_=bindthreads
void bindthreads(int *ppn, int *lrank)
{
char * ptr;
int envlist[MAX_CPUS_PER_NODE];
int uselist[MAX_CPUS_PER_NODE];
int socket0_cpus[MAX_CPUS_PER_NODE];
int socket1_cpus[MAX_CPUS_PER_NODE];
int cpu, max_cpus, inc, ndx, socket;
int i, j, k, myrank, nranks, tid, skip;
int max_ranks_per_node, my_base_ndx, cpus_per_rank, verbose;
int nthreads, rc, use_envlist, badlist, processor_version, smt_width;
int available_cpus_per_socket, cpus_per_socket;
int system_cores, available_cores_per_socket, socket1_base_cpu;
int half_smt_width, num_halfs, cpu_inc;
char * list_ptr;
char delimiters[] = {", "};
pthread_t thread;
cpu_set_t cpuset;
struct bitmask * cpus;
char * snames, * rnames, host[80];
int bind_threads, match, color, local_rank;
int ranks_per_node, ranks_per_socket;
MPI_Comm local_comm;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
bind_threads = 0;
ptr = getenv("BIND_THREADS");
if (ptr != NULL ) {
if (strncasecmp(ptr,"yes",3) == 0) bind_threads = 1;
}
verbose = 0;
ptr = getenv("BIND_VERBOSE");
if (ptr != NULL) {
if (strncasecmp(ptr,"yes",3) == 0) verbose = 1;
}
ptr = getenv("SYSTEM_CORES");
if (ptr != NULL) system_cores = atoi(ptr);
else system_cores = 2;
// make a communicator of all ranks on this node
snames = (char *) malloc(nranks*sizeof(host));
rnames = (char *) malloc(nranks*sizeof(host));
gethostname(host, sizeof(host));
for (i=0; i<sizeof(host); i++) {
if (host[i] == '.') {
host[i] = '\0';
break;
}
}
for (i=0; i<nranks; i++) {
ptr = snames + i*sizeof(host);
strncpy(ptr, host, sizeof(host));
}
MPI_Alltoall(snames, sizeof(host), MPI_BYTE,
rnames, sizeof(host), MPI_BYTE, MPI_COMM_WORLD);
color = 0;
match = 0;
for (i=0; i<nranks; i++) {
ptr = rnames + i*sizeof(host);
if (strcmp(host, ptr) == 0) {
match++;
if (match == 1) color = i;
}
}
MPI_Comm_split(MPI_COMM_WORLD, color, myrank, &local_comm);
MPI_Comm_rank(local_comm, &local_rank);
MPI_Comm_size(local_comm, &ranks_per_node);
*ppn = ranks_per_node;
*lrank = local_rank;
// do not return until local_rank and ranks_per_node are set
if (bind_threads == 0) {
if (myrank==0) fprintf(stderr,"bindthreads: not binding because BIND_THREADS is not set to yes.\n");
return;
}
ranks_per_socket = ranks_per_node / 2;
socket = local_rank / ranks_per_socket;
MPI_Allreduce(&ranks_per_node, &max_ranks_per_node, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
// max_cpus = max #cpus on the system
max_cpus = sysconf(_SC_NPROCESSORS_ONLN);
processor_version = get_pvr();
if (processor_version != POWER9) {
if (myrank==0) fprintf(stderr,"bindthreads: not binding because the processor is not power9.\n");
}
j = 0;
// Power9 socket 0 is numa node 0
cpus = numa_allocate_cpumask();
rc = numa_node_to_cpus(0, cpus);
if (rc >= 0) {
for (i = 0; i < cpus->size; i++)
if (numa_bitmask_isbitset(cpus, i)) socket0_cpus[j++] = i;
}
j = 0;
// Power9 socket 1 is numa node 8
cpus = numa_allocate_cpumask();
rc = numa_node_to_cpus(8, cpus);
if (rc >= 0) {
for (i = 0; i < cpus->size; i++)
if (numa_bitmask_isbitset(cpus, i)) socket1_cpus[j++] = i;
}
cpus_per_socket = j;
socket1_base_cpu = socket1_cpus[0];
k = 0;
for (i = 0; i < cpus_per_socket; i++) {
if ( socket0_cpus[i] < (socket1_base_cpu - 2*system_cores) ) k++;
}
available_cpus_per_socket = k;
// find max available OpenMP threads
#ifdef _OPENMP
nthreads = omp_get_max_threads();
#else
nthreads = 1;
#endif
cpus_per_rank = (2*available_cpus_per_socket) / max_ranks_per_node;
// find max available OpenMP threads
#ifdef _OPENMP
nthreads = omp_get_max_threads();
#else
nthreads = 1;
#endif
use_envlist = 0;
list_ptr = getenv("BIND_CPU_LIST");
if (list_ptr != NULL) {
k = 0;
ptr = strtok(list_ptr, delimiters);
while(ptr != NULL) {
envlist[k] = atoi(ptr);
ptr = strtok(NULL, delimiters);
k++;
}
if (k != max_ranks_per_node*nthreads) {
if (myrank == 0) fprintf(stderr, "bindthreads: BIND_CPU_LIST requires ranks_per_node*nthreads items ... using defaults\n");
use_envlist = 0;
}
else {
badlist = 0;
for (k=0; k<max_ranks_per_node*nthreads; k++) {
if (envlist[k] < 0 || envlist[k] >= max_cpus) badlist = 1;
}
if (badlist) {
if (myrank == 0) fprintf(stderr, "bindthreads: BIND_CPU_LIST must have 0<=cpu<max_cpus ... using defaults\n");
use_envlist = 0;
}
else use_envlist = 1;
}
if (use_envlist == 1) {
k = 0;
for (j=0; j<nthreads; j++) {
ndx = j + local_rank*nthreads;
uselist[k] = envlist[ndx];
k++;
}
goto setaffinity;
}
}
else {
ptr = getenv("BIND_STRIDE");
if (ptr != NULL) cpus_per_rank = atoi(ptr);
ptr = getenv("BIND_SKIP");
if (ptr != NULL) {
skip = atoi(ptr);
}
else skip = 0;
inc = cpus_per_rank / nthreads;
if (socket == 0) {
my_base_ndx = skip + cpus_per_rank * local_rank;
k = 0;
for (i=0; i<nthreads; i++) {
ndx = my_base_ndx + i*inc;
uselist[k] = socket0_cpus[ndx];
k++;
}
}
else {
my_base_ndx = skip + cpus_per_rank * (local_rank - ranks_per_socket);
k = 0;
for (i=0; i<nthreads; i++) {
ndx = my_base_ndx + i*inc;
uselist[k] = socket1_cpus[ndx];
k++;
}
}
}
setaffinity:
// set affinity for OpenMP threads
#ifdef _OPENMP
#pragma omp parallel private(tid,thread,cpu,cpuset,rc)
{
#pragma omp critical
{
tid = omp_get_thread_num();
#else
tid = 0;
#endif
thread = pthread_self();
cpu = uselist[tid];
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
rc = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
if (rc == 0 && verbose) {
fprintf(stderr, "bindthreads: binding MPI rank %d thread %d to cpu %d\n", myrank, tid, cpu);
}
if (rc != 0) {
fprintf(stderr, "bindthreads: pthread_set_affinity_np failed for MPI rank %d, thread %d, cpu %d on host %s \n", myrank, tid, cpu, host);
}
#ifdef _OPENMP
}
}
#endif
return;
}
|
point_outlier.h | /****************************************************************************
* VCGLib o o *
* Visual and Computer Graphics Library o o *
* _ O _ *
* Copyright(C) 2004-2016 \/)\/ *
* Visual Computing Lab /\/| *
* ISTI - Italian National Research Council | *
* \ *
* All rights reserved. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License (http://www.gnu.org/licenses/gpl.txt) *
* for more details. *
* *
****************************************************************************/
#ifndef VCG_TRI_OUTLIERS__H
#define VCG_TRI_OUTLIERS__H
#include <vcg/space/index/kdtree/kdtree.h>
namespace vcg
{
namespace tri
{
template <class MeshType>
class OutlierRemoval
{
public:
typedef typename MeshType::ScalarType ScalarType;
typedef typename vcg::KdTree<ScalarType> KdTreeType;
typedef typename vcg::KdTree<ScalarType>::PriorityQueue PriorityQueue;
/**
Compute an outlier probability value for each vertex of the mesh using the approch
in the paper "LoOP: Local Outlier Probabilities". The outlier probability is stored in the
vertex attribute "outlierScore". It use the input kdtree to find the kNearest of each vertex.
"LoOP: local outlier probabilities" by Hans-Peter Kriegel et al.
Proceedings of the 18th ACM conference on Information and knowledge management
*/
static void ComputeLoOPScore(MeshType& mesh, KdTreeType& kdTree, int kNearest)
{
vcg::tri::RequireCompactness(mesh);
typename MeshType::template PerVertexAttributeHandle<ScalarType> outlierScore = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("outlierScore"));
typename MeshType::template PerVertexAttributeHandle<ScalarType> sigma = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("sigma"));
typename MeshType::template PerVertexAttributeHandle<ScalarType> plof = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("plof"));
#pragma omp parallel for schedule(dynamic, 10)
for (size_t i = 0; i < mesh.vert.size(); i++)
{
PriorityQueue queue;
kdTree.doQueryK(mesh.vert[i].cP(), kNearest, queue);
ScalarType sum = 0;
for (int j = 0; j < queue.getNofElements(); j++)
sum += queue.getWeight(j);
sum /= (queue.getNofElements());
sigma[i] = sqrt(sum);
}
float mean = 0;
#pragma omp parallel for reduction(+: mean) schedule(dynamic, 10)
for (size_t i = 0; i < mesh.vert.size(); i++)
{
PriorityQueue queue;
kdTree.doQueryK(mesh.vert[i].cP(), kNearest, queue);
ScalarType sum = 0;
for (int j = 0; j < queue.getNofElements(); j++)
sum += sigma[queue.getIndex(j)];
sum /= (queue.getNofElements());
plof[i] = sigma[i] / sum - 1.0f;
mean += plof[i] * plof[i];
}
mean /= mesh.vert.size();
mean = sqrt(mean);
#pragma omp parallel for schedule(dynamic, 10)
for (size_t i = 0; i < mesh.vert.size(); i++)
{
ScalarType value = plof[i] / (mean * sqrt(2.0f));
double dem = 1.0 + 0.278393 * value;
dem += 0.230389 * value * value;
dem += 0.000972 * value * value * value;
dem += 0.078108 * value * value * value * value;
ScalarType op = max(0.0, 1.0 - 1.0 / dem);
outlierScore[i] = op;
}
tri::Allocator<MeshType>::DeletePerVertexAttribute(mesh, std::string("sigma"));
tri::Allocator<MeshType>::DeletePerVertexAttribute(mesh, std::string("plof"));
};
/**
Select all the vertex of the mesh with an outlier probability above the input threshold [0.0, 1.0].
*/
static int SelectLoOPOutliers(MeshType& mesh, KdTreeType& kdTree, int kNearest, float threshold)
{
ComputeLoOPScore(mesh, kdTree, kNearest);
int count = 0;
typename MeshType:: template PerVertexAttributeHandle<ScalarType> outlierScore = tri::Allocator<MeshType>::template GetPerVertexAttribute<ScalarType>(mesh, std::string("outlierScore"));
for (int i = 0; i < mesh.vert.size(); i++)
{
if (outlierScore[i] > threshold)
{
mesh.vert[i].SetS();
count++;
}
}
return count;
}
/**
Delete all the vertex of the mesh with an outlier probability above the input threshold [0.0, 1.0].
*/
static int DeleteLoOPOutliers(MeshType& m, KdTreeType& kdTree, int kNearest, float threshold)
{
SelectLoOPOutliers(m,kdTree,kNearest,threshold);
int ovn = m.vn;
for(typename MeshType::VertexIterator vi=m.vert.begin();vi!=m.vert.end();++vi)
if((*vi).IsS() ) tri::Allocator<MeshType>::DeleteVertex(m,*vi);
tri::Allocator<MeshType>::CompactVertexVector(m);
tri::Allocator<MeshType>::DeletePerVertexAttribute(m, std::string("outlierScore"));
return m.vn - ovn;
}
};
} // end namespace tri
} // end namespace vcg
#endif // VCG_TRI_OUTLIERS_H
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.